xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision 2add8e23)
1/*
2 *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
3 *
4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9 */
10
11
12#include <linux/linkage.h>
13#include <linux/threads.h>
14#include <linux/init.h>
15#include <asm/desc.h>
16#include <asm/segment.h>
17#include <asm/pgtable.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
23
24#ifdef CONFIG_PARAVIRT
25#include <asm/asm-offsets.h>
26#include <asm/paravirt.h>
27#else
28#define GET_CR2_INTO_RCX movq %cr2, %rcx
29#endif
30
31/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
32 * because we need identity-mapped pages.
33 *
34 */
35
36#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
37
38L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
39L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
40L4_START_KERNEL = pgd_index(__START_KERNEL_map)
41L3_START_KERNEL = pud_index(__START_KERNEL_map)
42
43	.text
44	.section .text.head
45	.code64
46	.globl startup_64
47startup_64:
48
49	/*
50	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
51	 * and someone has loaded an identity mapped page table
52	 * for us.  These identity mapped page tables map all of the
53	 * kernel pages and possibly all of memory.
54	 *
55	 * %esi holds a physical pointer to real_mode_data.
56	 *
57	 * We come here either directly from a 64bit bootloader, or from
58	 * arch/x86_64/boot/compressed/head.S.
59	 *
60	 * We only come here initially at boot nothing else comes here.
61	 *
62	 * Since we may be loaded at an address different from what we were
63	 * compiled to run at we first fixup the physical addresses in our page
64	 * tables and then reload them.
65	 */
66
67	/* Compute the delta between the address I am compiled to run at and the
68	 * address I am actually running at.
69	 */
70	leaq	_text(%rip), %rbp
71	subq	$_text - __START_KERNEL_map, %rbp
72
73	/* Is the address not 2M aligned? */
74	movq	%rbp, %rax
75	andl	$~PMD_PAGE_MASK, %eax
76	testl	%eax, %eax
77	jnz	bad_address
78
79	/* Is the address too large? */
80	leaq	_text(%rip), %rdx
81	movq	$PGDIR_SIZE, %rax
82	cmpq	%rax, %rdx
83	jae	bad_address
84
85	/* Fixup the physical addresses in the page table
86	 */
87	addq	%rbp, init_level4_pgt + 0(%rip)
88	addq	%rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
89	addq	%rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
90
91	addq	%rbp, level3_ident_pgt + 0(%rip)
92
93	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
94	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
95
96	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
97
98	/* Add an Identity mapping if I am above 1G */
99	leaq	_text(%rip), %rdi
100	andq	$PMD_PAGE_MASK, %rdi
101
102	movq	%rdi, %rax
103	shrq	$PUD_SHIFT, %rax
104	andq	$(PTRS_PER_PUD - 1), %rax
105	jz	ident_complete
106
107	leaq	(level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
108	leaq	level3_ident_pgt(%rip), %rbx
109	movq	%rdx, 0(%rbx, %rax, 8)
110
111	movq	%rdi, %rax
112	shrq	$PMD_SHIFT, %rax
113	andq	$(PTRS_PER_PMD - 1), %rax
114	leaq	__PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
115	leaq	level2_spare_pgt(%rip), %rbx
116	movq	%rdx, 0(%rbx, %rax, 8)
117ident_complete:
118
119	/*
120	 * Fixup the kernel text+data virtual addresses. Note that
121	 * we might write invalid pmds, when the kernel is relocated
122	 * cleanup_highmap() fixes this up along with the mappings
123	 * beyond _end.
124	 */
125
126	leaq	level2_kernel_pgt(%rip), %rdi
127	leaq	4096(%rdi), %r8
128	/* See if it is a valid page table entry */
1291:	testq	$1, 0(%rdi)
130	jz	2f
131	addq	%rbp, 0(%rdi)
132	/* Go to the next page */
1332:	addq	$8, %rdi
134	cmp	%r8, %rdi
135	jne	1b
136
137	/* Fixup phys_base */
138	addq	%rbp, phys_base(%rip)
139
140#ifdef CONFIG_X86_TRAMPOLINE
141	addq	%rbp, trampoline_level4_pgt + 0(%rip)
142	addq	%rbp, trampoline_level4_pgt + (511*8)(%rip)
143#endif
144
145	/* Due to ENTRY(), sometimes the empty space gets filled with
146	 * zeros. Better take a jmp than relying on empty space being
147	 * filled with 0x90 (nop)
148	 */
149	jmp secondary_startup_64
150ENTRY(secondary_startup_64)
151	/*
152	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
153	 * and someone has loaded a mapped page table.
154	 *
155	 * %esi holds a physical pointer to real_mode_data.
156	 *
157	 * We come here either from startup_64 (using physical addresses)
158	 * or from trampoline.S (using virtual addresses).
159	 *
160	 * Using virtual addresses from trampoline.S removes the need
161	 * to have any identity mapped pages in the kernel page table
162	 * after the boot processor executes this code.
163	 */
164
165	/* Enable PAE mode and PGE */
166	movl	$(X86_CR4_PAE | X86_CR4_PGE), %eax
167	movq	%rax, %cr4
168
169	/* Setup early boot stage 4 level pagetables. */
170	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
171	addq	phys_base(%rip), %rax
172	movq	%rax, %cr3
173
174	/* Ensure I am executing from virtual addresses */
175	movq	$1f, %rax
176	jmp	*%rax
1771:
178
179	/* Check if nx is implemented */
180	movl	$0x80000001, %eax
181	cpuid
182	movl	%edx,%edi
183
184	/* Setup EFER (Extended Feature Enable Register) */
185	movl	$MSR_EFER, %ecx
186	rdmsr
187	btsl	$_EFER_SCE, %eax	/* Enable System Call */
188	btl	$20,%edi		/* No Execute supported? */
189	jnc     1f
190	btsl	$_EFER_NX, %eax
1911:	wrmsr				/* Make changes effective */
192
193	/* Setup cr0 */
194#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
195			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
196			 X86_CR0_PG)
197	movl	$CR0_STATE, %eax
198	/* Make changes effective */
199	movq	%rax, %cr0
200
201	/* Setup a boot time stack */
202	movq stack_start(%rip),%rsp
203
204	/* zero EFLAGS after setting rsp */
205	pushq $0
206	popfq
207
208	/*
209	 * We must switch to a new descriptor in kernel space for the GDT
210	 * because soon the kernel won't have access anymore to the userspace
211	 * addresses where we're currently running on. We have to do that here
212	 * because in 32bit we couldn't load a 64bit linear address.
213	 */
214	lgdt	early_gdt_descr(%rip)
215
216	/* set up data segments. actually 0 would do too */
217	movl $__KERNEL_DS,%eax
218	movl %eax,%ds
219	movl %eax,%ss
220	movl %eax,%es
221
222	/*
223	 * We don't really need to load %fs or %gs, but load them anyway
224	 * to kill any stale realmode selectors.  This allows execution
225	 * under VT hardware.
226	 */
227	movl %eax,%fs
228	movl %eax,%gs
229
230	/* Set up %gs.
231	 *
232	 * The base of %gs always points to the bottom of the irqstack
233	 * union.  If the stack protector canary is enabled, it is
234	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
235	 * init data section till per cpu areas are set up.
236	 */
237	movl	$MSR_GS_BASE,%ecx
238	movq	initial_gs(%rip),%rax
239	movq    %rax,%rdx
240	shrq	$32,%rdx
241	wrmsr
242
243	/* esi is pointer to real mode structure with interesting info.
244	   pass it to C */
245	movl	%esi, %edi
246
247	/* Finally jump to run C code and to be on real kernel address
248	 * Since we are running on identity-mapped space we have to jump
249	 * to the full 64bit address, this is only possible as indirect
250	 * jump.  In addition we need to ensure %cs is set so we make this
251	 * a far return.
252	 */
253	movq	initial_code(%rip),%rax
254	pushq	$0		# fake return address to stop unwinder
255	pushq	$__KERNEL_CS	# set correct cs
256	pushq	%rax		# target address in negative space
257	lretq
258
259	/* SMP bootup changes these two */
260	__REFDATA
261	.align	8
262	ENTRY(initial_code)
263	.quad	x86_64_start_kernel
264	ENTRY(initial_gs)
265	.quad	INIT_PER_CPU_VAR(irq_stack_union)
266	__FINITDATA
267
268	ENTRY(stack_start)
269	.quad  init_thread_union+THREAD_SIZE-8
270	.word  0
271
272bad_address:
273	jmp bad_address
274
275	.section ".init.text","ax"
276#ifdef CONFIG_EARLY_PRINTK
277	.globl early_idt_handlers
278early_idt_handlers:
279	i = 0
280	.rept NUM_EXCEPTION_VECTORS
281	movl $i, %esi
282	jmp early_idt_handler
283	i = i + 1
284	.endr
285#endif
286
287ENTRY(early_idt_handler)
288#ifdef CONFIG_EARLY_PRINTK
289	cmpl $2,early_recursion_flag(%rip)
290	jz  1f
291	incl early_recursion_flag(%rip)
292	GET_CR2_INTO_RCX
293	movq %rcx,%r9
294	xorl %r8d,%r8d		# zero for error code
295	movl %esi,%ecx		# get vector number
296	# Test %ecx against mask of vectors that push error code.
297	cmpl $31,%ecx
298	ja 0f
299	movl $1,%eax
300	salq %cl,%rax
301	testl $0x27d00,%eax
302	je 0f
303	popq %r8		# get error code
3040:	movq 0(%rsp),%rcx	# get ip
305	movq 8(%rsp),%rdx	# get cs
306	xorl %eax,%eax
307	leaq early_idt_msg(%rip),%rdi
308	call early_printk
309	cmpl $2,early_recursion_flag(%rip)
310	jz  1f
311	call dump_stack
312#ifdef CONFIG_KALLSYMS
313	leaq early_idt_ripmsg(%rip),%rdi
314	movq 0(%rsp),%rsi	# get rip again
315	call __print_symbol
316#endif
317#endif /* EARLY_PRINTK */
3181:	hlt
319	jmp 1b
320
321#ifdef CONFIG_EARLY_PRINTK
322early_recursion_flag:
323	.long 0
324
325early_idt_msg:
326	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
327early_idt_ripmsg:
328	.asciz "RIP %s\n"
329#endif /* CONFIG_EARLY_PRINTK */
330	.previous
331
332.balign PAGE_SIZE
333
334#define NEXT_PAGE(name) \
335	.balign	PAGE_SIZE; \
336ENTRY(name)
337
338/* Automate the creation of 1 to 1 mapping pmd entries */
339#define PMDS(START, PERM, COUNT)			\
340	i = 0 ;						\
341	.rept (COUNT) ;					\
342	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
343	i = i + 1 ;					\
344	.endr
345
346	/*
347	 * This default setting generates an ident mapping at address 0x100000
348	 * and a mapping for the kernel that precisely maps virtual address
349	 * 0xffffffff80000000 to physical address 0x000000. (always using
350	 * 2Mbyte large pages provided by PAE mode)
351	 */
352NEXT_PAGE(init_level4_pgt)
353	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
354	.org	init_level4_pgt + L4_PAGE_OFFSET*8, 0
355	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
356	.org	init_level4_pgt + L4_START_KERNEL*8, 0
357	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
358	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
359
360NEXT_PAGE(level3_ident_pgt)
361	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
362	.fill	511,8,0
363
364NEXT_PAGE(level3_kernel_pgt)
365	.fill	L3_START_KERNEL,8,0
366	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
367	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
368	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
369
370NEXT_PAGE(level2_fixmap_pgt)
371	.fill	506,8,0
372	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
373	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
374	.fill	5,8,0
375
376NEXT_PAGE(level1_fixmap_pgt)
377	.fill	512,8,0
378
379NEXT_PAGE(level2_ident_pgt)
380	/* Since I easily can, map the first 1G.
381	 * Don't set NX because code runs from these pages.
382	 */
383	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
384
385NEXT_PAGE(level2_kernel_pgt)
386	/*
387	 * 512 MB kernel mapping. We spend a full page on this pagetable
388	 * anyway.
389	 *
390	 * The kernel code+data+bss must not be bigger than that.
391	 *
392	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
393	 *  If you want to increase this then increase MODULES_VADDR
394	 *  too.)
395	 */
396	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
397		KERNEL_IMAGE_SIZE/PMD_SIZE)
398
399NEXT_PAGE(level2_spare_pgt)
400	.fill   512, 8, 0
401
402#undef PMDS
403#undef NEXT_PAGE
404
405	.data
406	.align 16
407	.globl early_gdt_descr
408early_gdt_descr:
409	.word	GDT_ENTRIES*8-1
410early_gdt_descr_base:
411	.quad	INIT_PER_CPU_VAR(gdt_page)
412
413ENTRY(phys_base)
414	/* This must match the first entry in level2_kernel_pgt */
415	.quad   0x0000000000000000
416
417#include "../../x86/xen/xen-head.S"
418
419	.section .bss, "aw", @nobits
420	.align L1_CACHE_BYTES
421ENTRY(idt_table)
422	.skip 256 * 16
423
424	.section .bss.page_aligned, "aw", @nobits
425	.align PAGE_SIZE
426ENTRY(empty_zero_page)
427	.skip PAGE_SIZE
428