xref: /openbmc/linux/arch/x86/kernel/acpi/wakeup_64.S (revision 8795a739)
1/* SPDX-License-Identifier: GPL-2.0-only */
2.text
3#include <linux/linkage.h>
4#include <asm/segment.h>
5#include <asm/pgtable_types.h>
6#include <asm/page_types.h>
7#include <asm/msr.h>
8#include <asm/asm-offsets.h>
9#include <asm/frame.h>
10
11# Copyright 2003 Pavel Machek <pavel@suse.cz
12
13.code64
14	/*
15	 * Hooray, we are in Long 64-bit mode (but still running in low memory)
16	 */
17ENTRY(wakeup_long64)
18	movq	saved_magic, %rax
19	movq	$0x123456789abcdef0, %rdx
20	cmpq	%rdx, %rax
21	je	2f
22
23	/* stop here on a saved_magic mismatch */
24	movq $0xbad6d61676963, %rcx
251:
26	jmp 1b
272:
28	movw	$__KERNEL_DS, %ax
29	movw	%ax, %ss
30	movw	%ax, %ds
31	movw	%ax, %es
32	movw	%ax, %fs
33	movw	%ax, %gs
34	movq	saved_rsp, %rsp
35
36	movq	saved_rbx, %rbx
37	movq	saved_rdi, %rdi
38	movq	saved_rsi, %rsi
39	movq	saved_rbp, %rbp
40
41	movq	saved_rip, %rax
42	jmp	*%rax
43ENDPROC(wakeup_long64)
44
45ENTRY(do_suspend_lowlevel)
46	FRAME_BEGIN
47	subq	$8, %rsp
48	xorl	%eax, %eax
49	call	save_processor_state
50
51	movq	$saved_context, %rax
52	movq	%rsp, pt_regs_sp(%rax)
53	movq	%rbp, pt_regs_bp(%rax)
54	movq	%rsi, pt_regs_si(%rax)
55	movq	%rdi, pt_regs_di(%rax)
56	movq	%rbx, pt_regs_bx(%rax)
57	movq	%rcx, pt_regs_cx(%rax)
58	movq	%rdx, pt_regs_dx(%rax)
59	movq	%r8, pt_regs_r8(%rax)
60	movq	%r9, pt_regs_r9(%rax)
61	movq	%r10, pt_regs_r10(%rax)
62	movq	%r11, pt_regs_r11(%rax)
63	movq	%r12, pt_regs_r12(%rax)
64	movq	%r13, pt_regs_r13(%rax)
65	movq	%r14, pt_regs_r14(%rax)
66	movq	%r15, pt_regs_r15(%rax)
67	pushfq
68	popq	pt_regs_flags(%rax)
69
70	movq	$.Lresume_point, saved_rip(%rip)
71
72	movq	%rsp, saved_rsp
73	movq	%rbp, saved_rbp
74	movq	%rbx, saved_rbx
75	movq	%rdi, saved_rdi
76	movq	%rsi, saved_rsi
77
78	addq	$8, %rsp
79	movl	$3, %edi
80	xorl	%eax, %eax
81	call	x86_acpi_enter_sleep_state
82	/* in case something went wrong, restore the machine status and go on */
83	jmp	.Lresume_point
84
85	.align 4
86.Lresume_point:
87	/* We don't restore %rax, it must be 0 anyway */
88	movq	$saved_context, %rax
89	movq	saved_context_cr4(%rax), %rbx
90	movq	%rbx, %cr4
91	movq	saved_context_cr3(%rax), %rbx
92	movq	%rbx, %cr3
93	movq	saved_context_cr2(%rax), %rbx
94	movq	%rbx, %cr2
95	movq	saved_context_cr0(%rax), %rbx
96	movq	%rbx, %cr0
97	pushq	pt_regs_flags(%rax)
98	popfq
99	movq	pt_regs_sp(%rax), %rsp
100	movq	pt_regs_bp(%rax), %rbp
101	movq	pt_regs_si(%rax), %rsi
102	movq	pt_regs_di(%rax), %rdi
103	movq	pt_regs_bx(%rax), %rbx
104	movq	pt_regs_cx(%rax), %rcx
105	movq	pt_regs_dx(%rax), %rdx
106	movq	pt_regs_r8(%rax), %r8
107	movq	pt_regs_r9(%rax), %r9
108	movq	pt_regs_r10(%rax), %r10
109	movq	pt_regs_r11(%rax), %r11
110	movq	pt_regs_r12(%rax), %r12
111	movq	pt_regs_r13(%rax), %r13
112	movq	pt_regs_r14(%rax), %r14
113	movq	pt_regs_r15(%rax), %r15
114
115#ifdef CONFIG_KASAN
116	/*
117	 * The suspend path may have poisoned some areas deeper in the stack,
118	 * which we now need to unpoison.
119	 */
120	movq	%rsp, %rdi
121	call	kasan_unpoison_task_stack_below
122#endif
123
124	xorl	%eax, %eax
125	addq	$8, %rsp
126	FRAME_END
127	jmp	restore_processor_state
128ENDPROC(do_suspend_lowlevel)
129
130.data
131saved_rbp:		.quad	0
132saved_rsi:		.quad	0
133saved_rdi:		.quad	0
134saved_rbx:		.quad	0
135
136saved_rip:		.quad	0
137saved_rsp:		.quad	0
138
139ENTRY(saved_magic)	.quad	0
140