xref: /openbmc/linux/arch/x86/kernel/acpi/sleep.c (revision 37dd6b9f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sleep.c - x86-specific ACPI sleep support.
4  *
5  *  Copyright (C) 2001-2003 Patrick Mochel
6  *  Copyright (C) 2001-2003 Pavel Machek <pavel@ucw.cz>
7  */
8 
9 #include <linux/acpi.h>
10 #include <linux/memblock.h>
11 #include <linux/dmi.h>
12 #include <linux/cpumask.h>
13 #include <linux/pgtable.h>
14 #include <asm/segment.h>
15 #include <asm/desc.h>
16 #include <asm/cacheflush.h>
17 #include <asm/realmode.h>
18 #include <asm/hypervisor.h>
19 #include <asm/smp.h>
20 
21 #include <linux/ftrace.h>
22 #include "../../realmode/rm/wakeup.h"
23 #include "sleep.h"
24 
25 unsigned long acpi_realmode_flags;
26 
27 #if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
28 static char temp_stack[4096];
29 #endif
30 
31 /**
32  * acpi_get_wakeup_address - provide physical address for S3 wakeup
33  *
34  * Returns the physical address where the kernel should be resumed after the
35  * system awakes from S3, e.g. for programming into the firmware waking vector.
36  */
37 unsigned long acpi_get_wakeup_address(void)
38 {
39 	return ((unsigned long)(real_mode_header->wakeup_start));
40 }
41 
42 /**
43  * x86_acpi_enter_sleep_state - enter sleep state
44  * @state: Sleep state to enter.
45  *
46  * Wrapper around acpi_enter_sleep_state() to be called by assembly.
47  */
48 asmlinkage acpi_status __visible x86_acpi_enter_sleep_state(u8 state)
49 {
50 	return acpi_enter_sleep_state(state);
51 }
52 
53 /**
54  * x86_acpi_suspend_lowlevel - save kernel state
55  *
56  * Create an identity mapped page table and copy the wakeup routine to
57  * low memory.
58  */
59 int x86_acpi_suspend_lowlevel(void)
60 {
61 	struct wakeup_header *header =
62 		(struct wakeup_header *) __va(real_mode_header->wakeup_header);
63 
64 	if (header->signature != WAKEUP_HEADER_SIGNATURE) {
65 		printk(KERN_ERR "wakeup header does not match\n");
66 		return -EINVAL;
67 	}
68 
69 	header->video_mode = saved_video_mode;
70 
71 	header->pmode_behavior = 0;
72 
73 #ifndef CONFIG_64BIT
74 	native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
75 
76 	/*
77 	 * We have to check that we can write back the value, and not
78 	 * just read it.  At least on 90 nm Pentium M (Family 6, Model
79 	 * 13), reading an invalid MSR is not guaranteed to trap, see
80 	 * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
81 	 * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
82 	 * nm process with 512-KB L2 Cache Specification Update".
83 	 */
84 	if (!rdmsr_safe(MSR_EFER,
85 			&header->pmode_efer_low,
86 			&header->pmode_efer_high) &&
87 	    !wrmsr_safe(MSR_EFER,
88 			header->pmode_efer_low,
89 			header->pmode_efer_high))
90 		header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
91 #endif /* !CONFIG_64BIT */
92 
93 	header->pmode_cr0 = read_cr0();
94 	if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
95 		header->pmode_cr4 = __read_cr4();
96 		header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
97 	}
98 	if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
99 			&header->pmode_misc_en_low,
100 			&header->pmode_misc_en_high) &&
101 	    !wrmsr_safe(MSR_IA32_MISC_ENABLE,
102 			header->pmode_misc_en_low,
103 			header->pmode_misc_en_high))
104 		header->pmode_behavior |=
105 			(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
106 	header->realmode_flags = acpi_realmode_flags;
107 	header->real_magic = 0x12345678;
108 
109 #ifndef CONFIG_64BIT
110 	header->pmode_entry = (u32)&wakeup_pmode_return;
111 	header->pmode_cr3 = (u32)__pa_symbol(initial_page_table);
112 	saved_magic = 0x12345678;
113 #else /* CONFIG_64BIT */
114 #ifdef CONFIG_SMP
115 	/*
116 	 * As each CPU starts up, it will find its own stack pointer
117 	 * from its current_task->thread.sp. Typically that will be
118 	 * the idle thread for a newly-started AP, or even the boot
119 	 * CPU which will find it set to &init_task in the static
120 	 * per-cpu data.
121 	 *
122 	 * Make the resuming CPU use the temporary stack at startup
123 	 * by setting current->thread.sp to point to that. The true
124 	 * %rsp will be restored with the rest of the CPU context,
125 	 * by do_suspend_lowlevel(). And unwinders don't care about
126 	 * the abuse of ->thread.sp because it's a dead variable
127 	 * while the thread is running on the CPU anyway; the true
128 	 * value is in the actual %rsp register.
129 	 */
130 	current->thread.sp = (unsigned long)temp_stack + sizeof(temp_stack);
131 	/*
132 	 * Ensure the CPU knows which one it is when it comes back, if
133 	 * it isn't in parallel mode and expected to work that out for
134 	 * itself.
135 	 */
136 	if (!(smpboot_control & STARTUP_PARALLEL_MASK))
137 		smpboot_control = smp_processor_id();
138 #endif
139 	initial_code = (unsigned long)wakeup_long64;
140 	saved_magic = 0x123456789abcdef0L;
141 #endif /* CONFIG_64BIT */
142 
143 	/*
144 	 * Pause/unpause graph tracing around do_suspend_lowlevel as it has
145 	 * inconsistent call/return info after it jumps to the wakeup vector.
146 	 */
147 	pause_graph_tracing();
148 	do_suspend_lowlevel();
149 	unpause_graph_tracing();
150 	return 0;
151 }
152 
153 static int __init acpi_sleep_setup(char *str)
154 {
155 	while ((str != NULL) && (*str != '\0')) {
156 		if (strncmp(str, "s3_bios", 7) == 0)
157 			acpi_realmode_flags |= 1;
158 		if (strncmp(str, "s3_mode", 7) == 0)
159 			acpi_realmode_flags |= 2;
160 		if (strncmp(str, "s3_beep", 7) == 0)
161 			acpi_realmode_flags |= 4;
162 #ifdef CONFIG_HIBERNATION
163 		if (strncmp(str, "s4_hwsig", 8) == 0)
164 			acpi_check_s4_hw_signature = 1;
165 		if (strncmp(str, "s4_nohwsig", 10) == 0)
166 			acpi_check_s4_hw_signature = 0;
167 #endif
168 		if (strncmp(str, "nonvs", 5) == 0)
169 			acpi_nvs_nosave();
170 		if (strncmp(str, "nonvs_s3", 8) == 0)
171 			acpi_nvs_nosave_s3();
172 		if (strncmp(str, "old_ordering", 12) == 0)
173 			acpi_old_suspend_ordering();
174 		if (strncmp(str, "nobl", 4) == 0)
175 			acpi_sleep_no_blacklist();
176 		str = strchr(str, ',');
177 		if (str != NULL)
178 			str += strspn(str, ", \t");
179 	}
180 	return 1;
181 }
182 
183 __setup("acpi_sleep=", acpi_sleep_setup);
184 
185 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HYPERVISOR_GUEST)
186 static int __init init_s4_sigcheck(void)
187 {
188 	/*
189 	 * If running on a hypervisor, honour the ACPI specification
190 	 * by default and trigger a clean reboot when the hardware
191 	 * signature in FACS is changed after hibernation.
192 	 */
193 	if (acpi_check_s4_hw_signature == -1 &&
194 	    !hypervisor_is_type(X86_HYPER_NATIVE))
195 		acpi_check_s4_hw_signature = 1;
196 
197 	return 0;
198 }
199 /* This must happen before acpi_init() which is a subsys initcall */
200 arch_initcall(init_s4_sigcheck);
201 #endif
202