xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision 9900aa2f)
1/*
2 *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
3 *
4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9 */
10
11
12#include <linux/linkage.h>
13#include <linux/threads.h>
14#include <linux/init.h>
15#include <asm/segment.h>
16#include <asm/pgtable.h>
17#include <asm/page.h>
18#include <asm/msr.h>
19#include <asm/cache.h>
20#include <asm/processor-flags.h>
21#include <asm/percpu.h>
22#include <asm/nops.h>
23
24#ifdef CONFIG_PARAVIRT
25#include <asm/asm-offsets.h>
26#include <asm/paravirt.h>
27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
28#else
29#define GET_CR2_INTO(reg) movq %cr2, reg
30#define INTERRUPT_RETURN iretq
31#endif
32
33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
34 * because we need identity-mapped pages.
35 *
36 */
37
38#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
39
40L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
41L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
42L4_START_KERNEL = pgd_index(__START_KERNEL_map)
43L3_START_KERNEL = pud_index(__START_KERNEL_map)
44
45	.text
46	__HEAD
47	.code64
48	.globl startup_64
49startup_64:
50
51	/*
52	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
53	 * and someone has loaded an identity mapped page table
54	 * for us.  These identity mapped page tables map all of the
55	 * kernel pages and possibly all of memory.
56	 *
57	 * %esi holds a physical pointer to real_mode_data.
58	 *
59	 * We come here either directly from a 64bit bootloader, or from
60	 * arch/x86_64/boot/compressed/head.S.
61	 *
62	 * We only come here initially at boot nothing else comes here.
63	 *
64	 * Since we may be loaded at an address different from what we were
65	 * compiled to run at we first fixup the physical addresses in our page
66	 * tables and then reload them.
67	 */
68
69	/* Compute the delta between the address I am compiled to run at and the
70	 * address I am actually running at.
71	 */
72	leaq	_text(%rip), %rbp
73	subq	$_text - __START_KERNEL_map, %rbp
74
75	/* Is the address not 2M aligned? */
76	movq	%rbp, %rax
77	andl	$~PMD_PAGE_MASK, %eax
78	testl	%eax, %eax
79	jnz	bad_address
80
81	/* Is the address too large? */
82	leaq	_text(%rip), %rdx
83	movq	$PGDIR_SIZE, %rax
84	cmpq	%rax, %rdx
85	jae	bad_address
86
87	/* Fixup the physical addresses in the page table
88	 */
89	addq	%rbp, init_level4_pgt + 0(%rip)
90	addq	%rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
91	addq	%rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
92
93	addq	%rbp, level3_ident_pgt + 0(%rip)
94
95	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
96	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
97
98	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
99
100	/* Add an Identity mapping if I am above 1G */
101	leaq	_text(%rip), %rdi
102	andq	$PMD_PAGE_MASK, %rdi
103
104	movq	%rdi, %rax
105	shrq	$PUD_SHIFT, %rax
106	andq	$(PTRS_PER_PUD - 1), %rax
107	jz	ident_complete
108
109	leaq	(level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
110	leaq	level3_ident_pgt(%rip), %rbx
111	movq	%rdx, 0(%rbx, %rax, 8)
112
113	movq	%rdi, %rax
114	shrq	$PMD_SHIFT, %rax
115	andq	$(PTRS_PER_PMD - 1), %rax
116	leaq	__PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
117	leaq	level2_spare_pgt(%rip), %rbx
118	movq	%rdx, 0(%rbx, %rax, 8)
119ident_complete:
120
121	/*
122	 * Fixup the kernel text+data virtual addresses. Note that
123	 * we might write invalid pmds, when the kernel is relocated
124	 * cleanup_highmap() fixes this up along with the mappings
125	 * beyond _end.
126	 */
127
128	leaq	level2_kernel_pgt(%rip), %rdi
129	leaq	4096(%rdi), %r8
130	/* See if it is a valid page table entry */
1311:	testq	$1, 0(%rdi)
132	jz	2f
133	addq	%rbp, 0(%rdi)
134	/* Go to the next page */
1352:	addq	$8, %rdi
136	cmp	%r8, %rdi
137	jne	1b
138
139	/* Fixup phys_base */
140	addq	%rbp, phys_base(%rip)
141
142	/* Fixup trampoline */
143	addq	%rbp, trampoline_level4_pgt + 0(%rip)
144	addq	%rbp, trampoline_level4_pgt + (511*8)(%rip)
145
146	/* Due to ENTRY(), sometimes the empty space gets filled with
147	 * zeros. Better take a jmp than relying on empty space being
148	 * filled with 0x90 (nop)
149	 */
150	jmp secondary_startup_64
151ENTRY(secondary_startup_64)
152	/*
153	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
154	 * and someone has loaded a mapped page table.
155	 *
156	 * %esi holds a physical pointer to real_mode_data.
157	 *
158	 * We come here either from startup_64 (using physical addresses)
159	 * or from trampoline.S (using virtual addresses).
160	 *
161	 * Using virtual addresses from trampoline.S removes the need
162	 * to have any identity mapped pages in the kernel page table
163	 * after the boot processor executes this code.
164	 */
165
166	/* Enable PAE mode and PGE */
167	movl	$(X86_CR4_PAE | X86_CR4_PGE), %eax
168	movq	%rax, %cr4
169
170	/* Setup early boot stage 4 level pagetables. */
171	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
172	addq	phys_base(%rip), %rax
173	movq	%rax, %cr3
174
175	/* Ensure I am executing from virtual addresses */
176	movq	$1f, %rax
177	jmp	*%rax
1781:
179
180	/* Check if nx is implemented */
181	movl	$0x80000001, %eax
182	cpuid
183	movl	%edx,%edi
184
185	/* Setup EFER (Extended Feature Enable Register) */
186	movl	$MSR_EFER, %ecx
187	rdmsr
188	btsl	$_EFER_SCE, %eax	/* Enable System Call */
189	btl	$20,%edi		/* No Execute supported? */
190	jnc     1f
191	btsl	$_EFER_NX, %eax
1921:	wrmsr				/* Make changes effective */
193
194	/* Setup cr0 */
195#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
196			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
197			 X86_CR0_PG)
198	movl	$CR0_STATE, %eax
199	/* Make changes effective */
200	movq	%rax, %cr0
201
202	/* Setup a boot time stack */
203	movq stack_start(%rip),%rsp
204
205	/* zero EFLAGS after setting rsp */
206	pushq $0
207	popfq
208
209	/*
210	 * We must switch to a new descriptor in kernel space for the GDT
211	 * because soon the kernel won't have access anymore to the userspace
212	 * addresses where we're currently running on. We have to do that here
213	 * because in 32bit we couldn't load a 64bit linear address.
214	 */
215	lgdt	early_gdt_descr(%rip)
216
217	/* set up data segments */
218	xorl %eax,%eax
219	movl %eax,%ds
220	movl %eax,%ss
221	movl %eax,%es
222
223	/*
224	 * We don't really need to load %fs or %gs, but load them anyway
225	 * to kill any stale realmode selectors.  This allows execution
226	 * under VT hardware.
227	 */
228	movl %eax,%fs
229	movl %eax,%gs
230
231	/* Set up %gs.
232	 *
233	 * The base of %gs always points to the bottom of the irqstack
234	 * union.  If the stack protector canary is enabled, it is
235	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
236	 * init data section till per cpu areas are set up.
237	 */
238	movl	$MSR_GS_BASE,%ecx
239	movl	initial_gs(%rip),%eax
240	movl	initial_gs+4(%rip),%edx
241	wrmsr
242
243	/* esi is pointer to real mode structure with interesting info.
244	   pass it to C */
245	movl	%esi, %edi
246
247	/* Finally jump to run C code and to be on real kernel address
248	 * Since we are running on identity-mapped space we have to jump
249	 * to the full 64bit address, this is only possible as indirect
250	 * jump.  In addition we need to ensure %cs is set so we make this
251	 * a far return.
252	 */
253	movq	initial_code(%rip),%rax
254	pushq	$0		# fake return address to stop unwinder
255	pushq	$__KERNEL_CS	# set correct cs
256	pushq	%rax		# target address in negative space
257	lretq
258
259	/* SMP bootup changes these two */
260	__REFDATA
261	.align	8
262	ENTRY(initial_code)
263	.quad	x86_64_start_kernel
264	ENTRY(initial_gs)
265	.quad	INIT_PER_CPU_VAR(irq_stack_union)
266
267	ENTRY(stack_start)
268	.quad  init_thread_union+THREAD_SIZE-8
269	.word  0
270	__FINITDATA
271
272bad_address:
273	jmp bad_address
274
275	.section ".init.text","ax"
276	.globl early_idt_handlers
277early_idt_handlers:
278	# 104(%rsp) %rflags
279	#  96(%rsp) %cs
280	#  88(%rsp) %rip
281	#  80(%rsp) error code
282	i = 0
283	.rept NUM_EXCEPTION_VECTORS
284	.if (EXCEPTION_ERRCODE_MASK >> i) & 1
285	ASM_NOP2
286	.else
287	pushq $0		# Dummy error code, to make stack frame uniform
288	.endif
289	pushq $i		# 72(%rsp) Vector number
290	jmp early_idt_handler
291	i = i + 1
292	.endr
293
294ENTRY(early_idt_handler)
295	cld
296
297	cmpl $2,early_recursion_flag(%rip)
298	jz  1f
299	incl early_recursion_flag(%rip)
300
301	pushq %rax		# 64(%rsp)
302	pushq %rcx		# 56(%rsp)
303	pushq %rdx		# 48(%rsp)
304	pushq %rsi		# 40(%rsp)
305	pushq %rdi		# 32(%rsp)
306	pushq %r8		# 24(%rsp)
307	pushq %r9		# 16(%rsp)
308	pushq %r10		#  8(%rsp)
309	pushq %r11		#  0(%rsp)
310
311	cmpl $__KERNEL_CS,96(%rsp)
312	jne 10f
313
314	leaq 88(%rsp),%rdi	# Pointer to %rip
315	call early_fixup_exception
316	andl %eax,%eax
317	jnz 20f			# Found an exception entry
318
31910:
320#ifdef CONFIG_EARLY_PRINTK
321	GET_CR2_INTO(%r9)	# can clobber any volatile register if pv
322	movl 80(%rsp),%r8d	# error code
323	movl 72(%rsp),%esi	# vector number
324	movl 96(%rsp),%edx	# %cs
325	movq 88(%rsp),%rcx	# %rip
326	xorl %eax,%eax
327	leaq early_idt_msg(%rip),%rdi
328	call early_printk
329	cmpl $2,early_recursion_flag(%rip)
330	jz  1f
331	call dump_stack
332#ifdef CONFIG_KALLSYMS
333	leaq early_idt_ripmsg(%rip),%rdi
334	movq 40(%rsp),%rsi	# %rip again
335	call __print_symbol
336#endif
337#endif /* EARLY_PRINTK */
3381:	hlt
339	jmp 1b
340
34120:	# Exception table entry found
342	popq %r11
343	popq %r10
344	popq %r9
345	popq %r8
346	popq %rdi
347	popq %rsi
348	popq %rdx
349	popq %rcx
350	popq %rax
351	addq $16,%rsp		# drop vector number and error code
352	decl early_recursion_flag(%rip)
353	INTERRUPT_RETURN
354
355	.balign 4
356early_recursion_flag:
357	.long 0
358
359#ifdef CONFIG_EARLY_PRINTK
360early_idt_msg:
361	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
362early_idt_ripmsg:
363	.asciz "RIP %s\n"
364#endif /* CONFIG_EARLY_PRINTK */
365	.previous
366
367#define NEXT_PAGE(name) \
368	.balign	PAGE_SIZE; \
369ENTRY(name)
370
371/* Automate the creation of 1 to 1 mapping pmd entries */
372#define PMDS(START, PERM, COUNT)			\
373	i = 0 ;						\
374	.rept (COUNT) ;					\
375	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
376	i = i + 1 ;					\
377	.endr
378
379	.data
380	/*
381	 * This default setting generates an ident mapping at address 0x100000
382	 * and a mapping for the kernel that precisely maps virtual address
383	 * 0xffffffff80000000 to physical address 0x000000. (always using
384	 * 2Mbyte large pages provided by PAE mode)
385	 */
386NEXT_PAGE(init_level4_pgt)
387	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
388	.org	init_level4_pgt + L4_PAGE_OFFSET*8, 0
389	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
390	.org	init_level4_pgt + L4_START_KERNEL*8, 0
391	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
392	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
393
394NEXT_PAGE(level3_ident_pgt)
395	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
396	.fill	511,8,0
397
398NEXT_PAGE(level3_kernel_pgt)
399	.fill	L3_START_KERNEL,8,0
400	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
401	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
402	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
403
404NEXT_PAGE(level2_fixmap_pgt)
405	.fill	506,8,0
406	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
407	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
408	.fill	5,8,0
409
410NEXT_PAGE(level1_fixmap_pgt)
411	.fill	512,8,0
412
413NEXT_PAGE(level2_ident_pgt)
414	/* Since I easily can, map the first 1G.
415	 * Don't set NX because code runs from these pages.
416	 */
417	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
418
419NEXT_PAGE(level2_kernel_pgt)
420	/*
421	 * 512 MB kernel mapping. We spend a full page on this pagetable
422	 * anyway.
423	 *
424	 * The kernel code+data+bss must not be bigger than that.
425	 *
426	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
427	 *  If you want to increase this then increase MODULES_VADDR
428	 *  too.)
429	 */
430	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
431		KERNEL_IMAGE_SIZE/PMD_SIZE)
432
433NEXT_PAGE(level2_spare_pgt)
434	.fill   512, 8, 0
435
436#undef PMDS
437#undef NEXT_PAGE
438
439	.data
440	.align 16
441	.globl early_gdt_descr
442early_gdt_descr:
443	.word	GDT_ENTRIES*8-1
444early_gdt_descr_base:
445	.quad	INIT_PER_CPU_VAR(gdt_page)
446
447ENTRY(phys_base)
448	/* This must match the first entry in level2_kernel_pgt */
449	.quad   0x0000000000000000
450
451#include "../../x86/xen/xen-head.S"
452
453	.section .bss, "aw", @nobits
454	.align L1_CACHE_BYTES
455ENTRY(idt_table)
456	.skip IDT_ENTRIES * 16
457
458	.align L1_CACHE_BYTES
459ENTRY(nmi_idt_table)
460	.skip IDT_ENTRIES * 16
461
462	__PAGE_ALIGNED_BSS
463	.align PAGE_SIZE
464ENTRY(empty_zero_page)
465	.skip PAGE_SIZE
466