xref: /openbmc/linux/arch/x86/power/cpu.c (revision b4e18b29)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Suspend support specific for i386/x86-64.
4  *
5  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
6  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
7  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8  */
9 
10 #include <linux/suspend.h>
11 #include <linux/export.h>
12 #include <linux/smp.h>
13 #include <linux/perf_event.h>
14 #include <linux/tboot.h>
15 #include <linux/dmi.h>
16 #include <linux/pgtable.h>
17 
18 #include <asm/proto.h>
19 #include <asm/mtrr.h>
20 #include <asm/page.h>
21 #include <asm/mce.h>
22 #include <asm/suspend.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/debugreg.h>
25 #include <asm/cpu.h>
26 #include <asm/mmu_context.h>
27 #include <asm/cpu_device_id.h>
28 
29 #ifdef CONFIG_X86_32
30 __visible unsigned long saved_context_ebx;
31 __visible unsigned long saved_context_esp, saved_context_ebp;
32 __visible unsigned long saved_context_esi, saved_context_edi;
33 __visible unsigned long saved_context_eflags;
34 #endif
35 struct saved_context saved_context;
36 
37 static void msr_save_context(struct saved_context *ctxt)
38 {
39 	struct saved_msr *msr = ctxt->saved_msrs.array;
40 	struct saved_msr *end = msr + ctxt->saved_msrs.num;
41 
42 	while (msr < end) {
43 		msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
44 		msr++;
45 	}
46 }
47 
48 static void msr_restore_context(struct saved_context *ctxt)
49 {
50 	struct saved_msr *msr = ctxt->saved_msrs.array;
51 	struct saved_msr *end = msr + ctxt->saved_msrs.num;
52 
53 	while (msr < end) {
54 		if (msr->valid)
55 			wrmsrl(msr->info.msr_no, msr->info.reg.q);
56 		msr++;
57 	}
58 }
59 
60 /**
61  *	__save_processor_state - save CPU registers before creating a
62  *		hibernation image and before restoring the memory state from it
63  *	@ctxt - structure to store the registers contents in
64  *
65  *	NOTE: If there is a CPU register the modification of which by the
66  *	boot kernel (ie. the kernel used for loading the hibernation image)
67  *	might affect the operations of the restored target kernel (ie. the one
68  *	saved in the hibernation image), then its contents must be saved by this
69  *	function.  In other words, if kernel A is hibernated and different
70  *	kernel B is used for loading the hibernation image into memory, the
71  *	kernel A's __save_processor_state() function must save all registers
72  *	needed by kernel A, so that it can operate correctly after the resume
73  *	regardless of what kernel B does in the meantime.
74  */
75 static void __save_processor_state(struct saved_context *ctxt)
76 {
77 #ifdef CONFIG_X86_32
78 	mtrr_save_fixed_ranges(NULL);
79 #endif
80 	kernel_fpu_begin();
81 
82 	/*
83 	 * descriptor tables
84 	 */
85 	store_idt(&ctxt->idt);
86 
87 	/*
88 	 * We save it here, but restore it only in the hibernate case.
89 	 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
90 	 * mode in "secondary_startup_64". In 32-bit mode it is done via
91 	 * 'pmode_gdt' in wakeup_start.
92 	 */
93 	ctxt->gdt_desc.size = GDT_SIZE - 1;
94 	ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
95 
96 	store_tr(ctxt->tr);
97 
98 	/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
99 	/*
100 	 * segment registers
101 	 */
102 #ifdef CONFIG_X86_32_LAZY_GS
103 	savesegment(gs, ctxt->gs);
104 #endif
105 #ifdef CONFIG_X86_64
106 	savesegment(gs, ctxt->gs);
107 	savesegment(fs, ctxt->fs);
108 	savesegment(ds, ctxt->ds);
109 	savesegment(es, ctxt->es);
110 
111 	rdmsrl(MSR_FS_BASE, ctxt->fs_base);
112 	rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
113 	rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
114 	mtrr_save_fixed_ranges(NULL);
115 
116 	rdmsrl(MSR_EFER, ctxt->efer);
117 #endif
118 
119 	/*
120 	 * control registers
121 	 */
122 	ctxt->cr0 = read_cr0();
123 	ctxt->cr2 = read_cr2();
124 	ctxt->cr3 = __read_cr3();
125 	ctxt->cr4 = __read_cr4();
126 	ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
127 					       &ctxt->misc_enable);
128 	msr_save_context(ctxt);
129 }
130 
131 /* Needed by apm.c */
132 void save_processor_state(void)
133 {
134 	__save_processor_state(&saved_context);
135 	x86_platform.save_sched_clock_state();
136 }
137 #ifdef CONFIG_X86_32
138 EXPORT_SYMBOL(save_processor_state);
139 #endif
140 
141 static void do_fpu_end(void)
142 {
143 	/*
144 	 * Restore FPU regs if necessary.
145 	 */
146 	kernel_fpu_end();
147 }
148 
149 static void fix_processor_context(void)
150 {
151 	int cpu = smp_processor_id();
152 #ifdef CONFIG_X86_64
153 	struct desc_struct *desc = get_cpu_gdt_rw(cpu);
154 	tss_desc tss;
155 #endif
156 
157 	/*
158 	 * We need to reload TR, which requires that we change the
159 	 * GDT entry to indicate "available" first.
160 	 *
161 	 * XXX: This could probably all be replaced by a call to
162 	 * force_reload_TR().
163 	 */
164 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
165 
166 #ifdef CONFIG_X86_64
167 	memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
168 	tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
169 	write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
170 
171 	syscall_init();				/* This sets MSR_*STAR and related */
172 #else
173 	if (boot_cpu_has(X86_FEATURE_SEP))
174 		enable_sep_cpu();
175 #endif
176 	load_TR_desc();				/* This does ltr */
177 	load_mm_ldt(current->active_mm);	/* This does lldt */
178 	initialize_tlbstate_and_flush();
179 
180 	fpu__resume_cpu();
181 
182 	/* The processor is back on the direct GDT, load back the fixmap */
183 	load_fixmap_gdt(cpu);
184 }
185 
186 /**
187  * __restore_processor_state - restore the contents of CPU registers saved
188  *                             by __save_processor_state()
189  * @ctxt - structure to load the registers contents from
190  *
191  * The asm code that gets us here will have restored a usable GDT, although
192  * it will be pointing to the wrong alias.
193  */
194 static void notrace __restore_processor_state(struct saved_context *ctxt)
195 {
196 	struct cpuinfo_x86 *c;
197 
198 	if (ctxt->misc_enable_saved)
199 		wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
200 	/*
201 	 * control registers
202 	 */
203 	/* cr4 was introduced in the Pentium CPU */
204 #ifdef CONFIG_X86_32
205 	if (ctxt->cr4)
206 		__write_cr4(ctxt->cr4);
207 #else
208 /* CONFIG X86_64 */
209 	wrmsrl(MSR_EFER, ctxt->efer);
210 	__write_cr4(ctxt->cr4);
211 #endif
212 	write_cr3(ctxt->cr3);
213 	write_cr2(ctxt->cr2);
214 	write_cr0(ctxt->cr0);
215 
216 	/* Restore the IDT. */
217 	load_idt(&ctxt->idt);
218 
219 	/*
220 	 * Just in case the asm code got us here with the SS, DS, or ES
221 	 * out of sync with the GDT, update them.
222 	 */
223 	loadsegment(ss, __KERNEL_DS);
224 	loadsegment(ds, __USER_DS);
225 	loadsegment(es, __USER_DS);
226 
227 	/*
228 	 * Restore percpu access.  Percpu access can happen in exception
229 	 * handlers or in complicated helpers like load_gs_index().
230 	 */
231 #ifdef CONFIG_X86_64
232 	wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
233 #else
234 	loadsegment(fs, __KERNEL_PERCPU);
235 	loadsegment(gs, __KERNEL_STACK_CANARY);
236 #endif
237 
238 	/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
239 	fix_processor_context();
240 
241 	/*
242 	 * Now that we have descriptor tables fully restored and working
243 	 * exception handling, restore the usermode segments.
244 	 */
245 #ifdef CONFIG_X86_64
246 	loadsegment(ds, ctxt->es);
247 	loadsegment(es, ctxt->es);
248 	loadsegment(fs, ctxt->fs);
249 	load_gs_index(ctxt->gs);
250 
251 	/*
252 	 * Restore FSBASE and GSBASE after restoring the selectors, since
253 	 * restoring the selectors clobbers the bases.  Keep in mind
254 	 * that MSR_KERNEL_GS_BASE is horribly misnamed.
255 	 */
256 	wrmsrl(MSR_FS_BASE, ctxt->fs_base);
257 	wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
258 #elif defined(CONFIG_X86_32_LAZY_GS)
259 	loadsegment(gs, ctxt->gs);
260 #endif
261 
262 	do_fpu_end();
263 	tsc_verify_tsc_adjust(true);
264 	x86_platform.restore_sched_clock_state();
265 	mtrr_bp_restore();
266 	perf_restore_debug_store();
267 	msr_restore_context(ctxt);
268 
269 	c = &cpu_data(smp_processor_id());
270 	if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
271 		init_ia32_feat_ctl(c);
272 }
273 
274 /* Needed by apm.c */
275 void notrace restore_processor_state(void)
276 {
277 	__restore_processor_state(&saved_context);
278 }
279 #ifdef CONFIG_X86_32
280 EXPORT_SYMBOL(restore_processor_state);
281 #endif
282 
283 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
284 static void resume_play_dead(void)
285 {
286 	play_dead_common();
287 	tboot_shutdown(TB_SHUTDOWN_WFS);
288 	hlt_play_dead();
289 }
290 
291 int hibernate_resume_nonboot_cpu_disable(void)
292 {
293 	void (*play_dead)(void) = smp_ops.play_dead;
294 	int ret;
295 
296 	/*
297 	 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
298 	 * during hibernate image restoration, because it is likely that the
299 	 * monitored address will be actually written to at that time and then
300 	 * the "dead" CPU will attempt to execute instructions again, but the
301 	 * address in its instruction pointer may not be possible to resolve
302 	 * any more at that point (the page tables used by it previously may
303 	 * have been overwritten by hibernate image data).
304 	 *
305 	 * First, make sure that we wake up all the potentially disabled SMT
306 	 * threads which have been initially brought up and then put into
307 	 * mwait/cpuidle sleep.
308 	 * Those will be put to proper (not interfering with hibernation
309 	 * resume) sleep afterwards, and the resumed kernel will decide itself
310 	 * what to do with them.
311 	 */
312 	ret = cpuhp_smt_enable();
313 	if (ret)
314 		return ret;
315 	smp_ops.play_dead = resume_play_dead;
316 	ret = freeze_secondary_cpus(0);
317 	smp_ops.play_dead = play_dead;
318 	return ret;
319 }
320 #endif
321 
322 /*
323  * When bsp_check() is called in hibernate and suspend, cpu hotplug
324  * is disabled already. So it's unnessary to handle race condition between
325  * cpumask query and cpu hotplug.
326  */
327 static int bsp_check(void)
328 {
329 	if (cpumask_first(cpu_online_mask) != 0) {
330 		pr_warn("CPU0 is offline.\n");
331 		return -ENODEV;
332 	}
333 
334 	return 0;
335 }
336 
337 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
338 			   void *ptr)
339 {
340 	int ret = 0;
341 
342 	switch (action) {
343 	case PM_SUSPEND_PREPARE:
344 	case PM_HIBERNATION_PREPARE:
345 		ret = bsp_check();
346 		break;
347 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
348 	case PM_RESTORE_PREPARE:
349 		/*
350 		 * When system resumes from hibernation, online CPU0 because
351 		 * 1. it's required for resume and
352 		 * 2. the CPU was online before hibernation
353 		 */
354 		if (!cpu_online(0))
355 			_debug_hotplug_cpu(0, 1);
356 		break;
357 	case PM_POST_RESTORE:
358 		/*
359 		 * When a resume really happens, this code won't be called.
360 		 *
361 		 * This code is called only when user space hibernation software
362 		 * prepares for snapshot device during boot time. So we just
363 		 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
364 		 * preparing the snapshot device.
365 		 *
366 		 * This works for normal boot case in our CPU0 hotplug debug
367 		 * mode, i.e. CPU0 is offline and user mode hibernation
368 		 * software initializes during boot time.
369 		 *
370 		 * If CPU0 is online and user application accesses snapshot
371 		 * device after boot time, this will offline CPU0 and user may
372 		 * see different CPU0 state before and after accessing
373 		 * the snapshot device. But hopefully this is not a case when
374 		 * user debugging CPU0 hotplug. Even if users hit this case,
375 		 * they can easily online CPU0 back.
376 		 *
377 		 * To simplify this debug code, we only consider normal boot
378 		 * case. Otherwise we need to remember CPU0's state and restore
379 		 * to that state and resolve racy conditions etc.
380 		 */
381 		_debug_hotplug_cpu(0, 0);
382 		break;
383 #endif
384 	default:
385 		break;
386 	}
387 	return notifier_from_errno(ret);
388 }
389 
390 static int __init bsp_pm_check_init(void)
391 {
392 	/*
393 	 * Set this bsp_pm_callback as lower priority than
394 	 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
395 	 * earlier to disable cpu hotplug before bsp online check.
396 	 */
397 	pm_notifier(bsp_pm_callback, -INT_MAX);
398 	return 0;
399 }
400 
401 core_initcall(bsp_pm_check_init);
402 
403 static int msr_build_context(const u32 *msr_id, const int num)
404 {
405 	struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
406 	struct saved_msr *msr_array;
407 	int total_num;
408 	int i, j;
409 
410 	total_num = saved_msrs->num + num;
411 
412 	msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
413 	if (!msr_array) {
414 		pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
415 		return -ENOMEM;
416 	}
417 
418 	if (saved_msrs->array) {
419 		/*
420 		 * Multiple callbacks can invoke this function, so copy any
421 		 * MSR save requests from previous invocations.
422 		 */
423 		memcpy(msr_array, saved_msrs->array,
424 		       sizeof(struct saved_msr) * saved_msrs->num);
425 
426 		kfree(saved_msrs->array);
427 	}
428 
429 	for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
430 		msr_array[i].info.msr_no	= msr_id[j];
431 		msr_array[i].valid		= false;
432 		msr_array[i].info.reg.q		= 0;
433 	}
434 	saved_msrs->num   = total_num;
435 	saved_msrs->array = msr_array;
436 
437 	return 0;
438 }
439 
440 /*
441  * The following sections are a quirk framework for problematic BIOSen:
442  * Sometimes MSRs are modified by the BIOSen after suspended to
443  * RAM, this might cause unexpected behavior after wakeup.
444  * Thus we save/restore these specified MSRs across suspend/resume
445  * in order to work around it.
446  *
447  * For any further problematic BIOSen/platforms,
448  * please add your own function similar to msr_initialize_bdw.
449  */
450 static int msr_initialize_bdw(const struct dmi_system_id *d)
451 {
452 	/* Add any extra MSR ids into this array. */
453 	u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
454 
455 	pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
456 	return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
457 }
458 
459 static const struct dmi_system_id msr_save_dmi_table[] = {
460 	{
461 	 .callback = msr_initialize_bdw,
462 	 .ident = "BROADWELL BDX_EP",
463 	 .matches = {
464 		DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
465 		DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
466 		},
467 	},
468 	{}
469 };
470 
471 static int msr_save_cpuid_features(const struct x86_cpu_id *c)
472 {
473 	u32 cpuid_msr_id[] = {
474 		MSR_AMD64_CPUID_FN_1,
475 	};
476 
477 	pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
478 		c->family);
479 
480 	return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
481 }
482 
483 static const struct x86_cpu_id msr_save_cpu_table[] = {
484 	X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features),
485 	X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features),
486 	{}
487 };
488 
489 typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
490 static int pm_cpu_check(const struct x86_cpu_id *c)
491 {
492 	const struct x86_cpu_id *m;
493 	int ret = 0;
494 
495 	m = x86_match_cpu(msr_save_cpu_table);
496 	if (m) {
497 		pm_cpu_match_t fn;
498 
499 		fn = (pm_cpu_match_t)m->driver_data;
500 		ret = fn(m);
501 	}
502 
503 	return ret;
504 }
505 
506 static int pm_check_save_msr(void)
507 {
508 	dmi_check_system(msr_save_dmi_table);
509 	pm_cpu_check(msr_save_cpu_table);
510 
511 	return 0;
512 }
513 
514 device_initcall(pm_check_save_msr);
515