1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * relocate_kernel.S - put the kernel image in place to boot
4 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
5 */
6
7#include <linux/linkage.h>
8#include <asm/page_types.h>
9#include <asm/kexec.h>
10#include <asm/processor-flags.h>
11#include <asm/pgtable_types.h>
12#include <asm/nospec-branch.h>
13#include <asm/unwind_hints.h>
14
15/*
16 * Must be relocatable PIC code callable as a C function, in particular
17 * there must be a plain RET and not jump to return thunk.
18 */
19
20#define PTR(x) (x << 3)
21#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
22
23/*
24 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
25 * ~ control_page + PAGE_SIZE are used as data storage and stack for
26 * jumping back
27 */
28#define DATA(offset)		(KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
29
30/* Minimal CPU state */
31#define RSP			DATA(0x0)
32#define CR0			DATA(0x8)
33#define CR3			DATA(0x10)
34#define CR4			DATA(0x18)
35
36/* other data */
37#define CP_PA_TABLE_PAGE	DATA(0x20)
38#define CP_PA_SWAP_PAGE		DATA(0x28)
39#define CP_PA_BACKUP_PAGES_MAP	DATA(0x30)
40
41	.text
42	.align PAGE_SIZE
43	.code64
44SYM_CODE_START_NOALIGN(relocate_kernel)
45	UNWIND_HINT_EMPTY
46	ANNOTATE_NOENDBR
47	/*
48	 * %rdi indirection_page
49	 * %rsi page_list
50	 * %rdx start address
51	 * %rcx preserve_context
52	 * %r8  host_mem_enc_active
53	 */
54
55	/* Save the CPU context, used for jumping back */
56	pushq %rbx
57	pushq %rbp
58	pushq %r12
59	pushq %r13
60	pushq %r14
61	pushq %r15
62	pushf
63
64	movq	PTR(VA_CONTROL_PAGE)(%rsi), %r11
65	movq	%rsp, RSP(%r11)
66	movq	%cr0, %rax
67	movq	%rax, CR0(%r11)
68	movq	%cr3, %rax
69	movq	%rax, CR3(%r11)
70	movq	%cr4, %rax
71	movq	%rax, CR4(%r11)
72
73	/* Save CR4. Required to enable the right paging mode later. */
74	movq	%rax, %r13
75
76	/* zero out flags, and disable interrupts */
77	pushq $0
78	popfq
79
80	/* Save SME active flag */
81	movq	%r8, %r12
82
83	/*
84	 * get physical address of control page now
85	 * this is impossible after page table switch
86	 */
87	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r8
88
89	/* get physical address of page table now too */
90	movq	PTR(PA_TABLE_PAGE)(%rsi), %r9
91
92	/* get physical address of swap page now */
93	movq	PTR(PA_SWAP_PAGE)(%rsi), %r10
94
95	/* save some information for jumping back */
96	movq	%r9, CP_PA_TABLE_PAGE(%r11)
97	movq	%r10, CP_PA_SWAP_PAGE(%r11)
98	movq	%rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
99
100	/* Switch to the identity mapped page tables */
101	movq	%r9, %cr3
102
103	/* setup a new stack at the end of the physical control page */
104	lea	PAGE_SIZE(%r8), %rsp
105
106	/* jump to identity mapped page */
107	addq	$(identity_mapped - relocate_kernel), %r8
108	pushq	%r8
109	ANNOTATE_UNRET_SAFE
110	ret
111	int3
112SYM_CODE_END(relocate_kernel)
113
114SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
115	UNWIND_HINT_EMPTY
116	/* set return address to 0 if not preserving context */
117	pushq	$0
118	/* store the start address on the stack */
119	pushq   %rdx
120
121	/*
122	 * Clear X86_CR4_CET (if it was set) such that we can clear CR0_WP
123	 * below.
124	 */
125	movq	%cr4, %rax
126	andq	$~(X86_CR4_CET), %rax
127	movq	%rax, %cr4
128
129	/*
130	 * Set cr0 to a known state:
131	 *  - Paging enabled
132	 *  - Alignment check disabled
133	 *  - Write protect disabled
134	 *  - No task switch
135	 *  - Don't do FP software emulation.
136	 *  - Protected mode enabled
137	 */
138	movq	%cr0, %rax
139	andq	$~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
140	orl	$(X86_CR0_PG | X86_CR0_PE), %eax
141	movq	%rax, %cr0
142
143	/*
144	 * Set cr4 to a known state:
145	 *  - physical address extension enabled
146	 *  - 5-level paging, if it was enabled before
147	 */
148	movl	$X86_CR4_PAE, %eax
149	testq	$X86_CR4_LA57, %r13
150	jz	1f
151	orl	$X86_CR4_LA57, %eax
1521:
153	movq	%rax, %cr4
154
155	jmp 1f
1561:
157
158	/* Flush the TLB (needed?) */
159	movq	%r9, %cr3
160
161	/*
162	 * If SME is active, there could be old encrypted cache line
163	 * entries that will conflict with the now unencrypted memory
164	 * used by kexec. Flush the caches before copying the kernel.
165	 */
166	testq	%r12, %r12
167	jz 1f
168	wbinvd
1691:
170
171	movq	%rcx, %r11
172	call	swap_pages
173
174	/*
175	 * To be certain of avoiding problems with self-modifying code
176	 * I need to execute a serializing instruction here.
177	 * So I flush the TLB by reloading %cr3 here, it's handy,
178	 * and not processor dependent.
179	 */
180	movq	%cr3, %rax
181	movq	%rax, %cr3
182
183	/*
184	 * set all of the registers to known values
185	 * leave %rsp alone
186	 */
187
188	testq	%r11, %r11
189	jnz 1f
190	xorl	%eax, %eax
191	xorl	%ebx, %ebx
192	xorl    %ecx, %ecx
193	xorl    %edx, %edx
194	xorl    %esi, %esi
195	xorl    %edi, %edi
196	xorl    %ebp, %ebp
197	xorl	%r8d, %r8d
198	xorl	%r9d, %r9d
199	xorl	%r10d, %r10d
200	xorl	%r11d, %r11d
201	xorl	%r12d, %r12d
202	xorl	%r13d, %r13d
203	xorl	%r14d, %r14d
204	xorl	%r15d, %r15d
205
206	ANNOTATE_UNRET_SAFE
207	ret
208	int3
209
2101:
211	popq	%rdx
212	leaq	PAGE_SIZE(%r10), %rsp
213	ANNOTATE_RETPOLINE_SAFE
214	call	*%rdx
215
216	/* get the re-entry point of the peer system */
217	movq	0(%rsp), %rbp
218	leaq	relocate_kernel(%rip), %r8
219	movq	CP_PA_SWAP_PAGE(%r8), %r10
220	movq	CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
221	movq	CP_PA_TABLE_PAGE(%r8), %rax
222	movq	%rax, %cr3
223	lea	PAGE_SIZE(%r8), %rsp
224	call	swap_pages
225	movq	$virtual_mapped, %rax
226	pushq	%rax
227	ANNOTATE_UNRET_SAFE
228	ret
229	int3
230SYM_CODE_END(identity_mapped)
231
232SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
233	UNWIND_HINT_EMPTY
234	ANNOTATE_NOENDBR // RET target, above
235	movq	RSP(%r8), %rsp
236	movq	CR4(%r8), %rax
237	movq	%rax, %cr4
238	movq	CR3(%r8), %rax
239	movq	CR0(%r8), %r8
240	movq	%rax, %cr3
241	movq	%r8, %cr0
242	movq	%rbp, %rax
243
244	popf
245	popq	%r15
246	popq	%r14
247	popq	%r13
248	popq	%r12
249	popq	%rbp
250	popq	%rbx
251	ANNOTATE_UNRET_SAFE
252	ret
253	int3
254SYM_CODE_END(virtual_mapped)
255
256	/* Do the copies */
257SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
258	UNWIND_HINT_EMPTY
259	movq	%rdi, %rcx 	/* Put the page_list in %rcx */
260	xorl	%edi, %edi
261	xorl	%esi, %esi
262	jmp	1f
263
2640:	/* top, read another word for the indirection page */
265
266	movq	(%rbx), %rcx
267	addq	$8,	%rbx
2681:
269	testb	$0x1,	%cl   /* is it a destination page? */
270	jz	2f
271	movq	%rcx,	%rdi
272	andq	$0xfffffffffffff000, %rdi
273	jmp	0b
2742:
275	testb	$0x2,	%cl   /* is it an indirection page? */
276	jz	2f
277	movq	%rcx,   %rbx
278	andq	$0xfffffffffffff000, %rbx
279	jmp	0b
2802:
281	testb	$0x4,	%cl   /* is it the done indicator? */
282	jz	2f
283	jmp	3f
2842:
285	testb	$0x8,	%cl   /* is it the source indicator? */
286	jz	0b	      /* Ignore it otherwise */
287	movq	%rcx,   %rsi  /* For ever source page do a copy */
288	andq	$0xfffffffffffff000, %rsi
289
290	movq	%rdi, %rdx
291	movq	%rsi, %rax
292
293	movq	%r10, %rdi
294	movl	$512, %ecx
295	rep ; movsq
296
297	movq	%rax, %rdi
298	movq	%rdx, %rsi
299	movl	$512, %ecx
300	rep ; movsq
301
302	movq	%rdx, %rdi
303	movq	%r10, %rsi
304	movl	$512, %ecx
305	rep ; movsq
306
307	lea	PAGE_SIZE(%rax), %rsi
308	jmp	0b
3093:
310	ANNOTATE_UNRET_SAFE
311	ret
312	int3
313SYM_CODE_END(swap_pages)
314
315	.globl kexec_control_code_size
316.set kexec_control_code_size, . - relocate_kernel
317