xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision a8b88e84)
1/*
2 *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
3 *
4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9 */
10
11
12#include <linux/linkage.h>
13#include <linux/threads.h>
14#include <linux/init.h>
15#include <asm/segment.h>
16#include <asm/pgtable.h>
17#include <asm/page.h>
18#include <asm/msr.h>
19#include <asm/cache.h>
20#include <asm/processor-flags.h>
21#include <asm/percpu.h>
22#include <asm/nops.h>
23#include "../entry/calling.h"
24#include <asm/export.h>
25
26#ifdef CONFIG_PARAVIRT
27#include <asm/asm-offsets.h>
28#include <asm/paravirt.h>
29#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
30#else
31#define GET_CR2_INTO(reg) movq %cr2, reg
32#define INTERRUPT_RETURN iretq
33#endif
34
35/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
36 * because we need identity-mapped pages.
37 *
38 */
39
40#define p4d_index(x)	(((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
41#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
42
43PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
44PGD_START_KERNEL = pgd_index(__START_KERNEL_map)
45L3_START_KERNEL = pud_index(__START_KERNEL_map)
46
47	.text
48	__HEAD
49	.code64
50	.globl startup_64
51startup_64:
52	/*
53	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
54	 * and someone has loaded an identity mapped page table
55	 * for us.  These identity mapped page tables map all of the
56	 * kernel pages and possibly all of memory.
57	 *
58	 * %rsi holds a physical pointer to real_mode_data.
59	 *
60	 * We come here either directly from a 64bit bootloader, or from
61	 * arch/x86/boot/compressed/head_64.S.
62	 *
63	 * We only come here initially at boot nothing else comes here.
64	 *
65	 * Since we may be loaded at an address different from what we were
66	 * compiled to run at we first fixup the physical addresses in our page
67	 * tables and then reload them.
68	 */
69
70	/* Set up the stack for verify_cpu(), similar to initial_stack below */
71	leaq	(__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
72
73	/* Sanitize CPU configuration */
74	call verify_cpu
75
76	/*
77	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
78	 * the kernel and retrieve the modifier (SME encryption mask if SME
79	 * is active) to be added to the initial pgdir entry that will be
80	 * programmed into CR3.
81	 */
82	leaq	_text(%rip), %rdi
83	pushq	%rsi
84	call	__startup_64
85	popq	%rsi
86
87	/* Form the CR3 value being sure to include the CR3 modifier */
88	addq	$(early_top_pgt - __START_KERNEL_map), %rax
89	jmp 1f
90ENTRY(secondary_startup_64)
91	/*
92	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
93	 * and someone has loaded a mapped page table.
94	 *
95	 * %rsi holds a physical pointer to real_mode_data.
96	 *
97	 * We come here either from startup_64 (using physical addresses)
98	 * or from trampoline.S (using virtual addresses).
99	 *
100	 * Using virtual addresses from trampoline.S removes the need
101	 * to have any identity mapped pages in the kernel page table
102	 * after the boot processor executes this code.
103	 */
104
105	/* Sanitize CPU configuration */
106	call verify_cpu
107
108	/*
109	 * Retrieve the modifier (SME encryption mask if SME is active) to be
110	 * added to the initial pgdir entry that will be programmed into CR3.
111	 */
112	pushq	%rsi
113	call	__startup_secondary_64
114	popq	%rsi
115
116	/* Form the CR3 value being sure to include the CR3 modifier */
117	addq	$(init_top_pgt - __START_KERNEL_map), %rax
1181:
119
120	/* Enable PAE mode, PGE and LA57 */
121	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
122#ifdef CONFIG_X86_5LEVEL
123	orl	$X86_CR4_LA57, %ecx
124#endif
125	movq	%rcx, %cr4
126
127	/* Setup early boot stage 4-/5-level pagetables. */
128	addq	phys_base(%rip), %rax
129	movq	%rax, %cr3
130
131	/* Ensure I am executing from virtual addresses */
132	movq	$1f, %rax
133	jmp	*%rax
1341:
135
136	/* Check if nx is implemented */
137	movl	$0x80000001, %eax
138	cpuid
139	movl	%edx,%edi
140
141	/* Setup EFER (Extended Feature Enable Register) */
142	movl	$MSR_EFER, %ecx
143	rdmsr
144	btsl	$_EFER_SCE, %eax	/* Enable System Call */
145	btl	$20,%edi		/* No Execute supported? */
146	jnc     1f
147	btsl	$_EFER_NX, %eax
148	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
1491:	wrmsr				/* Make changes effective */
150
151	/* Setup cr0 */
152#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
153			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
154			 X86_CR0_PG)
155	movl	$CR0_STATE, %eax
156	/* Make changes effective */
157	movq	%rax, %cr0
158
159	/* Setup a boot time stack */
160	movq initial_stack(%rip), %rsp
161
162	/* zero EFLAGS after setting rsp */
163	pushq $0
164	popfq
165
166	/*
167	 * We must switch to a new descriptor in kernel space for the GDT
168	 * because soon the kernel won't have access anymore to the userspace
169	 * addresses where we're currently running on. We have to do that here
170	 * because in 32bit we couldn't load a 64bit linear address.
171	 */
172	lgdt	early_gdt_descr(%rip)
173
174	/* set up data segments */
175	xorl %eax,%eax
176	movl %eax,%ds
177	movl %eax,%ss
178	movl %eax,%es
179
180	/*
181	 * We don't really need to load %fs or %gs, but load them anyway
182	 * to kill any stale realmode selectors.  This allows execution
183	 * under VT hardware.
184	 */
185	movl %eax,%fs
186	movl %eax,%gs
187
188	/* Set up %gs.
189	 *
190	 * The base of %gs always points to the bottom of the irqstack
191	 * union.  If the stack protector canary is enabled, it is
192	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
193	 * init data section till per cpu areas are set up.
194	 */
195	movl	$MSR_GS_BASE,%ecx
196	movl	initial_gs(%rip),%eax
197	movl	initial_gs+4(%rip),%edx
198	wrmsr
199
200	/* rsi is pointer to real mode structure with interesting info.
201	   pass it to C */
202	movq	%rsi, %rdi
203
204.Ljump_to_C_code:
205	/*
206	 * Jump to run C code and to be on a real kernel address.
207	 * Since we are running on identity-mapped space we have to jump
208	 * to the full 64bit address, this is only possible as indirect
209	 * jump.  In addition we need to ensure %cs is set so we make this
210	 * a far return.
211	 *
212	 * Note: do not change to far jump indirect with 64bit offset.
213	 *
214	 * AMD does not support far jump indirect with 64bit offset.
215	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
216	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
217	 *		with the target specified by a far pointer in memory.
218	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
219	 *		with the target specified by a far pointer in memory.
220	 *
221	 * Intel64 does support 64bit offset.
222	 * Software Developer Manual Vol 2: states:
223	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
224	 *		address given in m16:16
225	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
226	 *		address given in m16:32.
227	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
228	 *		address given in m16:64.
229	 */
230	pushq	$.Lafter_lret	# put return address on stack for unwinder
231	xorq	%rbp, %rbp	# clear frame pointer
232	movq	initial_code(%rip), %rax
233	pushq	$__KERNEL_CS	# set correct cs
234	pushq	%rax		# target address in negative space
235	lretq
236.Lafter_lret:
237ENDPROC(secondary_startup_64)
238
239#include "verify_cpu.S"
240
241#ifdef CONFIG_HOTPLUG_CPU
242/*
243 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
244 * up already except stack. We just set up stack here. Then call
245 * start_secondary() via .Ljump_to_C_code.
246 */
247ENTRY(start_cpu0)
248	movq	initial_stack(%rip), %rsp
249	jmp	.Ljump_to_C_code
250ENDPROC(start_cpu0)
251#endif
252
253	/* Both SMP bootup and ACPI suspend change these variables */
254	__REFDATA
255	.balign	8
256	GLOBAL(initial_code)
257	.quad	x86_64_start_kernel
258	GLOBAL(initial_gs)
259	.quad	INIT_PER_CPU_VAR(irq_stack_union)
260	GLOBAL(initial_stack)
261	/*
262	 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
263	 * unwinder reliably detect the end of the stack.
264	 */
265	.quad  init_thread_union + THREAD_SIZE - SIZEOF_PTREGS
266	__FINITDATA
267
268	__INIT
269ENTRY(early_idt_handler_array)
270	i = 0
271	.rept NUM_EXCEPTION_VECTORS
272	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
273	pushq $0		# Dummy error code, to make stack frame uniform
274	.endif
275	pushq $i		# 72(%rsp) Vector number
276	jmp early_idt_handler_common
277	i = i + 1
278	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
279	.endr
280ENDPROC(early_idt_handler_array)
281
282early_idt_handler_common:
283	/*
284	 * The stack is the hardware frame, an error code or zero, and the
285	 * vector number.
286	 */
287	cld
288
289	incl early_recursion_flag(%rip)
290
291	/* The vector number is currently in the pt_regs->di slot. */
292	pushq %rsi				/* pt_regs->si */
293	movq 8(%rsp), %rsi			/* RSI = vector number */
294	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
295	pushq %rdx				/* pt_regs->dx */
296	pushq %rcx				/* pt_regs->cx */
297	pushq %rax				/* pt_regs->ax */
298	pushq %r8				/* pt_regs->r8 */
299	pushq %r9				/* pt_regs->r9 */
300	pushq %r10				/* pt_regs->r10 */
301	pushq %r11				/* pt_regs->r11 */
302	pushq %rbx				/* pt_regs->bx */
303	pushq %rbp				/* pt_regs->bp */
304	pushq %r12				/* pt_regs->r12 */
305	pushq %r13				/* pt_regs->r13 */
306	pushq %r14				/* pt_regs->r14 */
307	pushq %r15				/* pt_regs->r15 */
308
309	cmpq $14,%rsi		/* Page fault? */
310	jnz 10f
311	GET_CR2_INTO(%rdi)	/* Can clobber any volatile register if pv */
312	call early_make_pgtable
313	andl %eax,%eax
314	jz 20f			/* All good */
315
31610:
317	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
318	call early_fixup_exception
319
32020:
321	decl early_recursion_flag(%rip)
322	jmp restore_regs_and_iret
323ENDPROC(early_idt_handler_common)
324
325	__INITDATA
326
327	.balign 4
328GLOBAL(early_recursion_flag)
329	.long 0
330
331#define NEXT_PAGE(name) \
332	.balign	PAGE_SIZE; \
333GLOBAL(name)
334
335/* Automate the creation of 1 to 1 mapping pmd entries */
336#define PMDS(START, PERM, COUNT)			\
337	i = 0 ;						\
338	.rept (COUNT) ;					\
339	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
340	i = i + 1 ;					\
341	.endr
342
343	__INITDATA
344NEXT_PAGE(early_top_pgt)
345	.fill	511,8,0
346#ifdef CONFIG_X86_5LEVEL
347	.quad	level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
348#else
349	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
350#endif
351
352NEXT_PAGE(early_dynamic_pgts)
353	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
354
355	.data
356
357#ifndef CONFIG_XEN
358NEXT_PAGE(init_top_pgt)
359	.fill	512,8,0
360#else
361NEXT_PAGE(init_top_pgt)
362	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
363	.org    init_top_pgt + PGD_PAGE_OFFSET*8, 0
364	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
365	.org    init_top_pgt + PGD_START_KERNEL*8, 0
366	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
367	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
368
369NEXT_PAGE(level3_ident_pgt)
370	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
371	.fill	511, 8, 0
372NEXT_PAGE(level2_ident_pgt)
373	/* Since I easily can, map the first 1G.
374	 * Don't set NX because code runs from these pages.
375	 */
376	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
377#endif
378
379#ifdef CONFIG_X86_5LEVEL
380NEXT_PAGE(level4_kernel_pgt)
381	.fill	511,8,0
382	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
383#endif
384
385NEXT_PAGE(level3_kernel_pgt)
386	.fill	L3_START_KERNEL,8,0
387	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
388	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
389	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
390
391NEXT_PAGE(level2_kernel_pgt)
392	/*
393	 * 512 MB kernel mapping. We spend a full page on this pagetable
394	 * anyway.
395	 *
396	 * The kernel code+data+bss must not be bigger than that.
397	 *
398	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
399	 *  If you want to increase this then increase MODULES_VADDR
400	 *  too.)
401	 */
402	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
403		KERNEL_IMAGE_SIZE/PMD_SIZE)
404
405NEXT_PAGE(level2_fixmap_pgt)
406	.fill	506,8,0
407	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
408	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
409	.fill	5,8,0
410
411NEXT_PAGE(level1_fixmap_pgt)
412	.fill	512,8,0
413
414#undef PMDS
415
416	.data
417	.align 16
418	.globl early_gdt_descr
419early_gdt_descr:
420	.word	GDT_ENTRIES*8-1
421early_gdt_descr_base:
422	.quad	INIT_PER_CPU_VAR(gdt_page)
423
424ENTRY(phys_base)
425	/* This must match the first entry in level2_kernel_pgt */
426	.quad   0x0000000000000000
427EXPORT_SYMBOL(phys_base)
428
429#include "../../x86/xen/xen-head.S"
430
431	__PAGE_ALIGNED_BSS
432NEXT_PAGE(empty_zero_page)
433	.skip PAGE_SIZE
434EXPORT_SYMBOL(empty_zero_page)
435
436