xref: /openbmc/linux/arch/x86/include/asm/suspend_64.h (revision 0d4bb5e4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright 2001-2003 Pavel Machek <pavel@suse.cz>
4  * Based on code
5  * Copyright 2001 Patrick Mochel <mochel@osdl.org>
6  */
7 #ifndef _ASM_X86_SUSPEND_64_H
8 #define _ASM_X86_SUSPEND_64_H
9 
10 #include <asm/desc.h>
11 #include <asm/fpu/api.h>
12 
13 /*
14  * Image of the saved processor state, used by the low level ACPI suspend to
15  * RAM code and by the low level hibernation code.
16  *
17  * If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S
18  * and make sure that __save/__restore_processor_state(), defined in
19  * arch/x86/power/cpu.c, still work as required.
20  *
21  * Because the structure is packed, make sure to avoid unaligned members. For
22  * optimisation purposes but also because tools like kmemleak only search for
23  * pointers that are aligned.
24  */
25 struct saved_context {
26 	struct pt_regs regs;
27 
28 	/*
29 	 * User CS and SS are saved in current_pt_regs().  The rest of the
30 	 * segment selectors need to be saved and restored here.
31 	 */
32 	u16 ds, es, fs, gs;
33 
34 	/*
35 	 * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
36 	 * so we save them separately.  We save the kernelmode GSBASE to
37 	 * restore percpu access after resume.
38 	 */
39 	unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
40 
41 	unsigned long cr0, cr2, cr3, cr4;
42 	u64 misc_enable;
43 	struct saved_msrs saved_msrs;
44 	unsigned long efer;
45 	u16 gdt_pad; /* Unused */
46 	struct desc_ptr gdt_desc;
47 	u16 idt_pad;
48 	struct desc_ptr idt;
49 	u16 ldt;
50 	u16 tss;
51 	unsigned long tr;
52 	unsigned long safety;
53 	unsigned long return_address;
54 	bool misc_enable_saved;
55 } __attribute__((packed));
56 
57 #define loaddebug(thread,register) \
58 	set_debugreg((thread)->debugreg##register, register)
59 
60 /* routines for saving/restoring kernel state */
61 extern char core_restore_code[];
62 extern char restore_registers[];
63 
64 #endif /* _ASM_X86_SUSPEND_64_H */
65