xref: /openbmc/linux/arch/x86/power/cpu.c (revision 68198dca)
1 /*
2  * Suspend support specific for i386/x86-64.
3  *
4  * Distribute under GPLv2
5  *
6  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9  */
10 
11 #include <linux/suspend.h>
12 #include <linux/export.h>
13 #include <linux/smp.h>
14 #include <linux/perf_event.h>
15 #include <linux/tboot.h>
16 
17 #include <asm/pgtable.h>
18 #include <asm/proto.h>
19 #include <asm/mtrr.h>
20 #include <asm/page.h>
21 #include <asm/mce.h>
22 #include <asm/suspend.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/debugreg.h>
25 #include <asm/cpu.h>
26 #include <asm/mmu_context.h>
27 #include <linux/dmi.h>
28 
29 #ifdef CONFIG_X86_32
30 __visible unsigned long saved_context_ebx;
31 __visible unsigned long saved_context_esp, saved_context_ebp;
32 __visible unsigned long saved_context_esi, saved_context_edi;
33 __visible unsigned long saved_context_eflags;
34 #endif
35 struct saved_context saved_context;
36 
37 static void msr_save_context(struct saved_context *ctxt)
38 {
39 	struct saved_msr *msr = ctxt->saved_msrs.array;
40 	struct saved_msr *end = msr + ctxt->saved_msrs.num;
41 
42 	while (msr < end) {
43 		msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
44 		msr++;
45 	}
46 }
47 
48 static void msr_restore_context(struct saved_context *ctxt)
49 {
50 	struct saved_msr *msr = ctxt->saved_msrs.array;
51 	struct saved_msr *end = msr + ctxt->saved_msrs.num;
52 
53 	while (msr < end) {
54 		if (msr->valid)
55 			wrmsrl(msr->info.msr_no, msr->info.reg.q);
56 		msr++;
57 	}
58 }
59 
60 /**
61  *	__save_processor_state - save CPU registers before creating a
62  *		hibernation image and before restoring the memory state from it
63  *	@ctxt - structure to store the registers contents in
64  *
65  *	NOTE: If there is a CPU register the modification of which by the
66  *	boot kernel (ie. the kernel used for loading the hibernation image)
67  *	might affect the operations of the restored target kernel (ie. the one
68  *	saved in the hibernation image), then its contents must be saved by this
69  *	function.  In other words, if kernel A is hibernated and different
70  *	kernel B is used for loading the hibernation image into memory, the
71  *	kernel A's __save_processor_state() function must save all registers
72  *	needed by kernel A, so that it can operate correctly after the resume
73  *	regardless of what kernel B does in the meantime.
74  */
75 static void __save_processor_state(struct saved_context *ctxt)
76 {
77 #ifdef CONFIG_X86_32
78 	mtrr_save_fixed_ranges(NULL);
79 #endif
80 	kernel_fpu_begin();
81 
82 	/*
83 	 * descriptor tables
84 	 */
85 	store_idt(&ctxt->idt);
86 
87 	/*
88 	 * We save it here, but restore it only in the hibernate case.
89 	 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
90 	 * mode in "secondary_startup_64". In 32-bit mode it is done via
91 	 * 'pmode_gdt' in wakeup_start.
92 	 */
93 	ctxt->gdt_desc.size = GDT_SIZE - 1;
94 	ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
95 
96 	store_tr(ctxt->tr);
97 
98 	/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
99 	/*
100 	 * segment registers
101 	 */
102 #ifdef CONFIG_X86_32_LAZY_GS
103 	savesegment(gs, ctxt->gs);
104 #endif
105 #ifdef CONFIG_X86_64
106 	savesegment(gs, ctxt->gs);
107 	savesegment(fs, ctxt->fs);
108 	savesegment(ds, ctxt->ds);
109 	savesegment(es, ctxt->es);
110 
111 	rdmsrl(MSR_FS_BASE, ctxt->fs_base);
112 	rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
113 	rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
114 	mtrr_save_fixed_ranges(NULL);
115 
116 	rdmsrl(MSR_EFER, ctxt->efer);
117 #endif
118 
119 	/*
120 	 * control registers
121 	 */
122 	ctxt->cr0 = read_cr0();
123 	ctxt->cr2 = read_cr2();
124 	ctxt->cr3 = __read_cr3();
125 	ctxt->cr4 = __read_cr4();
126 #ifdef CONFIG_X86_64
127 	ctxt->cr8 = read_cr8();
128 #endif
129 	ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
130 					       &ctxt->misc_enable);
131 	msr_save_context(ctxt);
132 }
133 
134 /* Needed by apm.c */
135 void save_processor_state(void)
136 {
137 	__save_processor_state(&saved_context);
138 	x86_platform.save_sched_clock_state();
139 }
140 #ifdef CONFIG_X86_32
141 EXPORT_SYMBOL(save_processor_state);
142 #endif
143 
144 static void do_fpu_end(void)
145 {
146 	/*
147 	 * Restore FPU regs if necessary.
148 	 */
149 	kernel_fpu_end();
150 }
151 
152 static void fix_processor_context(void)
153 {
154 	int cpu = smp_processor_id();
155 	struct tss_struct *t = &per_cpu(cpu_tss, cpu);
156 #ifdef CONFIG_X86_64
157 	struct desc_struct *desc = get_cpu_gdt_rw(cpu);
158 	tss_desc tss;
159 #endif
160 	set_tss_desc(cpu, t);	/*
161 				 * This just modifies memory; should not be
162 				 * necessary. But... This is necessary, because
163 				 * 386 hardware has concept of busy TSS or some
164 				 * similar stupidity.
165 				 */
166 
167 #ifdef CONFIG_X86_64
168 	memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
169 	tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
170 	write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
171 
172 	syscall_init();				/* This sets MSR_*STAR and related */
173 #else
174 	if (boot_cpu_has(X86_FEATURE_SEP))
175 		enable_sep_cpu();
176 #endif
177 	load_TR_desc();				/* This does ltr */
178 	load_mm_ldt(current->active_mm);	/* This does lldt */
179 	initialize_tlbstate_and_flush();
180 
181 	fpu__resume_cpu();
182 
183 	/* The processor is back on the direct GDT, load back the fixmap */
184 	load_fixmap_gdt(cpu);
185 }
186 
187 /**
188  * __restore_processor_state - restore the contents of CPU registers saved
189  *                             by __save_processor_state()
190  * @ctxt - structure to load the registers contents from
191  *
192  * The asm code that gets us here will have restored a usable GDT, although
193  * it will be pointing to the wrong alias.
194  */
195 static void notrace __restore_processor_state(struct saved_context *ctxt)
196 {
197 	if (ctxt->misc_enable_saved)
198 		wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
199 	/*
200 	 * control registers
201 	 */
202 	/* cr4 was introduced in the Pentium CPU */
203 #ifdef CONFIG_X86_32
204 	if (ctxt->cr4)
205 		__write_cr4(ctxt->cr4);
206 #else
207 /* CONFIG X86_64 */
208 	wrmsrl(MSR_EFER, ctxt->efer);
209 	write_cr8(ctxt->cr8);
210 	__write_cr4(ctxt->cr4);
211 #endif
212 	write_cr3(ctxt->cr3);
213 	write_cr2(ctxt->cr2);
214 	write_cr0(ctxt->cr0);
215 
216 	/* Restore the IDT. */
217 	load_idt(&ctxt->idt);
218 
219 	/*
220 	 * Just in case the asm code got us here with the SS, DS, or ES
221 	 * out of sync with the GDT, update them.
222 	 */
223 	loadsegment(ss, __KERNEL_DS);
224 	loadsegment(ds, __USER_DS);
225 	loadsegment(es, __USER_DS);
226 
227 	/*
228 	 * Restore percpu access.  Percpu access can happen in exception
229 	 * handlers or in complicated helpers like load_gs_index().
230 	 */
231 #ifdef CONFIG_X86_64
232 	wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
233 #else
234 	loadsegment(fs, __KERNEL_PERCPU);
235 	loadsegment(gs, __KERNEL_STACK_CANARY);
236 #endif
237 
238 	/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
239 	fix_processor_context();
240 
241 	/*
242 	 * Now that we have descriptor tables fully restored and working
243 	 * exception handling, restore the usermode segments.
244 	 */
245 #ifdef CONFIG_X86_64
246 	loadsegment(ds, ctxt->es);
247 	loadsegment(es, ctxt->es);
248 	loadsegment(fs, ctxt->fs);
249 	load_gs_index(ctxt->gs);
250 
251 	/*
252 	 * Restore FSBASE and GSBASE after restoring the selectors, since
253 	 * restoring the selectors clobbers the bases.  Keep in mind
254 	 * that MSR_KERNEL_GS_BASE is horribly misnamed.
255 	 */
256 	wrmsrl(MSR_FS_BASE, ctxt->fs_base);
257 	wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
258 #elif defined(CONFIG_X86_32_LAZY_GS)
259 	loadsegment(gs, ctxt->gs);
260 #endif
261 
262 	do_fpu_end();
263 	tsc_verify_tsc_adjust(true);
264 	x86_platform.restore_sched_clock_state();
265 	mtrr_bp_restore();
266 	perf_restore_debug_store();
267 	msr_restore_context(ctxt);
268 }
269 
270 /* Needed by apm.c */
271 void notrace restore_processor_state(void)
272 {
273 	__restore_processor_state(&saved_context);
274 }
275 #ifdef CONFIG_X86_32
276 EXPORT_SYMBOL(restore_processor_state);
277 #endif
278 
279 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
280 static void resume_play_dead(void)
281 {
282 	play_dead_common();
283 	tboot_shutdown(TB_SHUTDOWN_WFS);
284 	hlt_play_dead();
285 }
286 
287 int hibernate_resume_nonboot_cpu_disable(void)
288 {
289 	void (*play_dead)(void) = smp_ops.play_dead;
290 	int ret;
291 
292 	/*
293 	 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
294 	 * during hibernate image restoration, because it is likely that the
295 	 * monitored address will be actually written to at that time and then
296 	 * the "dead" CPU will attempt to execute instructions again, but the
297 	 * address in its instruction pointer may not be possible to resolve
298 	 * any more at that point (the page tables used by it previously may
299 	 * have been overwritten by hibernate image data).
300 	 */
301 	smp_ops.play_dead = resume_play_dead;
302 	ret = disable_nonboot_cpus();
303 	smp_ops.play_dead = play_dead;
304 	return ret;
305 }
306 #endif
307 
308 /*
309  * When bsp_check() is called in hibernate and suspend, cpu hotplug
310  * is disabled already. So it's unnessary to handle race condition between
311  * cpumask query and cpu hotplug.
312  */
313 static int bsp_check(void)
314 {
315 	if (cpumask_first(cpu_online_mask) != 0) {
316 		pr_warn("CPU0 is offline.\n");
317 		return -ENODEV;
318 	}
319 
320 	return 0;
321 }
322 
323 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
324 			   void *ptr)
325 {
326 	int ret = 0;
327 
328 	switch (action) {
329 	case PM_SUSPEND_PREPARE:
330 	case PM_HIBERNATION_PREPARE:
331 		ret = bsp_check();
332 		break;
333 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
334 	case PM_RESTORE_PREPARE:
335 		/*
336 		 * When system resumes from hibernation, online CPU0 because
337 		 * 1. it's required for resume and
338 		 * 2. the CPU was online before hibernation
339 		 */
340 		if (!cpu_online(0))
341 			_debug_hotplug_cpu(0, 1);
342 		break;
343 	case PM_POST_RESTORE:
344 		/*
345 		 * When a resume really happens, this code won't be called.
346 		 *
347 		 * This code is called only when user space hibernation software
348 		 * prepares for snapshot device during boot time. So we just
349 		 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
350 		 * preparing the snapshot device.
351 		 *
352 		 * This works for normal boot case in our CPU0 hotplug debug
353 		 * mode, i.e. CPU0 is offline and user mode hibernation
354 		 * software initializes during boot time.
355 		 *
356 		 * If CPU0 is online and user application accesses snapshot
357 		 * device after boot time, this will offline CPU0 and user may
358 		 * see different CPU0 state before and after accessing
359 		 * the snapshot device. But hopefully this is not a case when
360 		 * user debugging CPU0 hotplug. Even if users hit this case,
361 		 * they can easily online CPU0 back.
362 		 *
363 		 * To simplify this debug code, we only consider normal boot
364 		 * case. Otherwise we need to remember CPU0's state and restore
365 		 * to that state and resolve racy conditions etc.
366 		 */
367 		_debug_hotplug_cpu(0, 0);
368 		break;
369 #endif
370 	default:
371 		break;
372 	}
373 	return notifier_from_errno(ret);
374 }
375 
376 static int __init bsp_pm_check_init(void)
377 {
378 	/*
379 	 * Set this bsp_pm_callback as lower priority than
380 	 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
381 	 * earlier to disable cpu hotplug before bsp online check.
382 	 */
383 	pm_notifier(bsp_pm_callback, -INT_MAX);
384 	return 0;
385 }
386 
387 core_initcall(bsp_pm_check_init);
388 
389 static int msr_init_context(const u32 *msr_id, const int total_num)
390 {
391 	int i = 0;
392 	struct saved_msr *msr_array;
393 
394 	if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
395 		pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
396 		return -EINVAL;
397 	}
398 
399 	msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
400 	if (!msr_array) {
401 		pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
402 		return -ENOMEM;
403 	}
404 
405 	for (i = 0; i < total_num; i++) {
406 		msr_array[i].info.msr_no	= msr_id[i];
407 		msr_array[i].valid		= false;
408 		msr_array[i].info.reg.q		= 0;
409 	}
410 	saved_context.saved_msrs.num	= total_num;
411 	saved_context.saved_msrs.array	= msr_array;
412 
413 	return 0;
414 }
415 
416 /*
417  * The following section is a quirk framework for problematic BIOSen:
418  * Sometimes MSRs are modified by the BIOSen after suspended to
419  * RAM, this might cause unexpected behavior after wakeup.
420  * Thus we save/restore these specified MSRs across suspend/resume
421  * in order to work around it.
422  *
423  * For any further problematic BIOSen/platforms,
424  * please add your own function similar to msr_initialize_bdw.
425  */
426 static int msr_initialize_bdw(const struct dmi_system_id *d)
427 {
428 	/* Add any extra MSR ids into this array. */
429 	u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
430 
431 	pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
432 	return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
433 }
434 
435 static const struct dmi_system_id msr_save_dmi_table[] = {
436 	{
437 	 .callback = msr_initialize_bdw,
438 	 .ident = "BROADWELL BDX_EP",
439 	 .matches = {
440 		DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
441 		DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
442 		},
443 	},
444 	{}
445 };
446 
447 static int pm_check_save_msr(void)
448 {
449 	dmi_check_system(msr_save_dmi_table);
450 	return 0;
451 }
452 
453 device_initcall(pm_check_save_msr);
454