xref: /openbmc/linux/arch/x86/kernel/acpi/wakeup_64.S (revision b8d312aa)
1/* SPDX-License-Identifier: GPL-2.0-only */
2.text
3#include <linux/linkage.h>
4#include <asm/segment.h>
5#include <asm/pgtable_types.h>
6#include <asm/page_types.h>
7#include <asm/msr.h>
8#include <asm/asm-offsets.h>
9#include <asm/frame.h>
10
11# Copyright 2003 Pavel Machek <pavel@suse.cz
12
13.code64
14	/*
15	 * Hooray, we are in Long 64-bit mode (but still running in low memory)
16	 */
17ENTRY(wakeup_long64)
18	movq	saved_magic, %rax
19	movq	$0x123456789abcdef0, %rdx
20	cmpq	%rdx, %rax
21	jne	bogus_64_magic
22
23	movw	$__KERNEL_DS, %ax
24	movw	%ax, %ss
25	movw	%ax, %ds
26	movw	%ax, %es
27	movw	%ax, %fs
28	movw	%ax, %gs
29	movq	saved_rsp, %rsp
30
31	movq	saved_rbx, %rbx
32	movq	saved_rdi, %rdi
33	movq	saved_rsi, %rsi
34	movq	saved_rbp, %rbp
35
36	movq	saved_rip, %rax
37	jmp	*%rax
38ENDPROC(wakeup_long64)
39
40bogus_64_magic:
41	jmp	bogus_64_magic
42
43ENTRY(do_suspend_lowlevel)
44	FRAME_BEGIN
45	subq	$8, %rsp
46	xorl	%eax, %eax
47	call	save_processor_state
48
49	movq	$saved_context, %rax
50	movq	%rsp, pt_regs_sp(%rax)
51	movq	%rbp, pt_regs_bp(%rax)
52	movq	%rsi, pt_regs_si(%rax)
53	movq	%rdi, pt_regs_di(%rax)
54	movq	%rbx, pt_regs_bx(%rax)
55	movq	%rcx, pt_regs_cx(%rax)
56	movq	%rdx, pt_regs_dx(%rax)
57	movq	%r8, pt_regs_r8(%rax)
58	movq	%r9, pt_regs_r9(%rax)
59	movq	%r10, pt_regs_r10(%rax)
60	movq	%r11, pt_regs_r11(%rax)
61	movq	%r12, pt_regs_r12(%rax)
62	movq	%r13, pt_regs_r13(%rax)
63	movq	%r14, pt_regs_r14(%rax)
64	movq	%r15, pt_regs_r15(%rax)
65	pushfq
66	popq	pt_regs_flags(%rax)
67
68	movq	$.Lresume_point, saved_rip(%rip)
69
70	movq	%rsp, saved_rsp
71	movq	%rbp, saved_rbp
72	movq	%rbx, saved_rbx
73	movq	%rdi, saved_rdi
74	movq	%rsi, saved_rsi
75
76	addq	$8, %rsp
77	movl	$3, %edi
78	xorl	%eax, %eax
79	call	x86_acpi_enter_sleep_state
80	/* in case something went wrong, restore the machine status and go on */
81	jmp	.Lresume_point
82
83	.align 4
84.Lresume_point:
85	/* We don't restore %rax, it must be 0 anyway */
86	movq	$saved_context, %rax
87	movq	saved_context_cr4(%rax), %rbx
88	movq	%rbx, %cr4
89	movq	saved_context_cr3(%rax), %rbx
90	movq	%rbx, %cr3
91	movq	saved_context_cr2(%rax), %rbx
92	movq	%rbx, %cr2
93	movq	saved_context_cr0(%rax), %rbx
94	movq	%rbx, %cr0
95	pushq	pt_regs_flags(%rax)
96	popfq
97	movq	pt_regs_sp(%rax), %rsp
98	movq	pt_regs_bp(%rax), %rbp
99	movq	pt_regs_si(%rax), %rsi
100	movq	pt_regs_di(%rax), %rdi
101	movq	pt_regs_bx(%rax), %rbx
102	movq	pt_regs_cx(%rax), %rcx
103	movq	pt_regs_dx(%rax), %rdx
104	movq	pt_regs_r8(%rax), %r8
105	movq	pt_regs_r9(%rax), %r9
106	movq	pt_regs_r10(%rax), %r10
107	movq	pt_regs_r11(%rax), %r11
108	movq	pt_regs_r12(%rax), %r12
109	movq	pt_regs_r13(%rax), %r13
110	movq	pt_regs_r14(%rax), %r14
111	movq	pt_regs_r15(%rax), %r15
112
113#ifdef CONFIG_KASAN
114	/*
115	 * The suspend path may have poisoned some areas deeper in the stack,
116	 * which we now need to unpoison.
117	 */
118	movq	%rsp, %rdi
119	call	kasan_unpoison_task_stack_below
120#endif
121
122	xorl	%eax, %eax
123	addq	$8, %rsp
124	FRAME_END
125	jmp	restore_processor_state
126ENDPROC(do_suspend_lowlevel)
127
128.data
129saved_rbp:		.quad	0
130saved_rsi:		.quad	0
131saved_rdi:		.quad	0
132saved_rbx:		.quad	0
133
134saved_rip:		.quad	0
135saved_rsp:		.quad	0
136
137ENTRY(saved_magic)	.quad	0
138