xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision a939098a)
1/*
2 *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
3 *
4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9 */
10
11
12#include <linux/linkage.h>
13#include <linux/threads.h>
14#include <linux/init.h>
15#include <asm/desc.h>
16#include <asm/segment.h>
17#include <asm/pgtable.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21#include <asm/processor-flags.h>
22
23#ifdef CONFIG_PARAVIRT
24#include <asm/asm-offsets.h>
25#include <asm/paravirt.h>
26#else
27#define GET_CR2_INTO_RCX movq %cr2, %rcx
28#endif
29
30/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
31 * because we need identity-mapped pages.
32 *
33 */
34
35	.text
36	.section .text.head
37	.code64
38	.globl startup_64
39startup_64:
40
41	/*
42	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
43	 * and someone has loaded an identity mapped page table
44	 * for us.  These identity mapped page tables map all of the
45	 * kernel pages and possibly all of memory.
46	 *
47	 * %esi holds a physical pointer to real_mode_data.
48	 *
49	 * We come here either directly from a 64bit bootloader, or from
50	 * arch/x86_64/boot/compressed/head.S.
51	 *
52	 * We only come here initially at boot nothing else comes here.
53	 *
54	 * Since we may be loaded at an address different from what we were
55	 * compiled to run at we first fixup the physical addresses in our page
56	 * tables and then reload them.
57	 */
58
59	/* Compute the delta between the address I am compiled to run at and the
60	 * address I am actually running at.
61	 */
62	leaq	_text(%rip), %rbp
63	subq	$_text - __START_KERNEL_map, %rbp
64
65	/* Is the address not 2M aligned? */
66	movq	%rbp, %rax
67	andl	$~PMD_PAGE_MASK, %eax
68	testl	%eax, %eax
69	jnz	bad_address
70
71	/* Is the address too large? */
72	leaq	_text(%rip), %rdx
73	movq	$PGDIR_SIZE, %rax
74	cmpq	%rax, %rdx
75	jae	bad_address
76
77	/* Fixup the physical addresses in the page table
78	 */
79	addq	%rbp, init_level4_pgt + 0(%rip)
80	addq	%rbp, init_level4_pgt + (258*8)(%rip)
81	addq	%rbp, init_level4_pgt + (511*8)(%rip)
82
83	addq	%rbp, level3_ident_pgt + 0(%rip)
84
85	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
86	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
87
88	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
89
90	/* Add an Identity mapping if I am above 1G */
91	leaq	_text(%rip), %rdi
92	andq	$PMD_PAGE_MASK, %rdi
93
94	movq	%rdi, %rax
95	shrq	$PUD_SHIFT, %rax
96	andq	$(PTRS_PER_PUD - 1), %rax
97	jz	ident_complete
98
99	leaq	(level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
100	leaq	level3_ident_pgt(%rip), %rbx
101	movq	%rdx, 0(%rbx, %rax, 8)
102
103	movq	%rdi, %rax
104	shrq	$PMD_SHIFT, %rax
105	andq	$(PTRS_PER_PMD - 1), %rax
106	leaq	__PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
107	leaq	level2_spare_pgt(%rip), %rbx
108	movq	%rdx, 0(%rbx, %rax, 8)
109ident_complete:
110
111	/*
112	 * Fixup the kernel text+data virtual addresses. Note that
113	 * we might write invalid pmds, when the kernel is relocated
114	 * cleanup_highmap() fixes this up along with the mappings
115	 * beyond _end.
116	 */
117
118	leaq	level2_kernel_pgt(%rip), %rdi
119	leaq	4096(%rdi), %r8
120	/* See if it is a valid page table entry */
1211:	testq	$1, 0(%rdi)
122	jz	2f
123	addq	%rbp, 0(%rdi)
124	/* Go to the next page */
1252:	addq	$8, %rdi
126	cmp	%r8, %rdi
127	jne	1b
128
129	/* Fixup phys_base */
130	addq	%rbp, phys_base(%rip)
131
132#ifdef CONFIG_X86_TRAMPOLINE
133	addq	%rbp, trampoline_level4_pgt + 0(%rip)
134	addq	%rbp, trampoline_level4_pgt + (511*8)(%rip)
135#endif
136
137	/* Due to ENTRY(), sometimes the empty space gets filled with
138	 * zeros. Better take a jmp than relying on empty space being
139	 * filled with 0x90 (nop)
140	 */
141	jmp secondary_startup_64
142ENTRY(secondary_startup_64)
143	/*
144	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
145	 * and someone has loaded a mapped page table.
146	 *
147	 * %esi holds a physical pointer to real_mode_data.
148	 *
149	 * We come here either from startup_64 (using physical addresses)
150	 * or from trampoline.S (using virtual addresses).
151	 *
152	 * Using virtual addresses from trampoline.S removes the need
153	 * to have any identity mapped pages in the kernel page table
154	 * after the boot processor executes this code.
155	 */
156
157	/* Enable PAE mode and PGE */
158	movl	$(X86_CR4_PAE | X86_CR4_PGE), %eax
159	movq	%rax, %cr4
160
161	/* Setup early boot stage 4 level pagetables. */
162	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
163	addq	phys_base(%rip), %rax
164	movq	%rax, %cr3
165
166	/* Ensure I am executing from virtual addresses */
167	movq	$1f, %rax
168	jmp	*%rax
1691:
170
171	/* Check if nx is implemented */
172	movl	$0x80000001, %eax
173	cpuid
174	movl	%edx,%edi
175
176	/* Setup EFER (Extended Feature Enable Register) */
177	movl	$MSR_EFER, %ecx
178	rdmsr
179	btsl	$_EFER_SCE, %eax	/* Enable System Call */
180	btl	$20,%edi		/* No Execute supported? */
181	jnc     1f
182	btsl	$_EFER_NX, %eax
1831:	wrmsr				/* Make changes effective */
184
185	/* Setup cr0 */
186#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
187			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
188			 X86_CR0_PG)
189	movl	$CR0_STATE, %eax
190	/* Make changes effective */
191	movq	%rax, %cr0
192
193	/* Setup a boot time stack */
194	movq stack_start(%rip),%rsp
195
196	/* zero EFLAGS after setting rsp */
197	pushq $0
198	popfq
199
200	/*
201	 * We must switch to a new descriptor in kernel space for the GDT
202	 * because soon the kernel won't have access anymore to the userspace
203	 * addresses where we're currently running on. We have to do that here
204	 * because in 32bit we couldn't load a 64bit linear address.
205	 */
206	lgdt	early_gdt_descr(%rip)
207
208	/* set up data segments. actually 0 would do too */
209	movl $__KERNEL_DS,%eax
210	movl %eax,%ds
211	movl %eax,%ss
212	movl %eax,%es
213
214	/*
215	 * We don't really need to load %fs or %gs, but load them anyway
216	 * to kill any stale realmode selectors.  This allows execution
217	 * under VT hardware.
218	 */
219	movl %eax,%fs
220	movl %eax,%gs
221
222	/*
223	 * Setup up a dummy PDA. this is just for some early bootup code
224	 * that does in_interrupt()
225	 */
226	movl	$MSR_GS_BASE,%ecx
227	movq	$empty_zero_page,%rax
228	movq    %rax,%rdx
229	shrq	$32,%rdx
230	wrmsr
231
232	/* esi is pointer to real mode structure with interesting info.
233	   pass it to C */
234	movl	%esi, %edi
235
236	/* Finally jump to run C code and to be on real kernel address
237	 * Since we are running on identity-mapped space we have to jump
238	 * to the full 64bit address, this is only possible as indirect
239	 * jump.  In addition we need to ensure %cs is set so we make this
240	 * a far return.
241	 */
242	movq	initial_code(%rip),%rax
243	pushq	$0		# fake return address to stop unwinder
244	pushq	$__KERNEL_CS	# set correct cs
245	pushq	%rax		# target address in negative space
246	lretq
247
248	/* SMP bootup changes these two */
249	__REFDATA
250	.align	8
251	ENTRY(initial_code)
252	.quad	x86_64_start_kernel
253	__FINITDATA
254
255	ENTRY(stack_start)
256	.quad  init_thread_union+THREAD_SIZE-8
257	.word  0
258
259bad_address:
260	jmp bad_address
261
262	.section ".init.text","ax"
263#ifdef CONFIG_EARLY_PRINTK
264	.globl early_idt_handlers
265early_idt_handlers:
266	i = 0
267	.rept NUM_EXCEPTION_VECTORS
268	movl $i, %esi
269	jmp early_idt_handler
270	i = i + 1
271	.endr
272#endif
273
274ENTRY(early_idt_handler)
275#ifdef CONFIG_EARLY_PRINTK
276	cmpl $2,early_recursion_flag(%rip)
277	jz  1f
278	incl early_recursion_flag(%rip)
279	GET_CR2_INTO_RCX
280	movq %rcx,%r9
281	xorl %r8d,%r8d		# zero for error code
282	movl %esi,%ecx		# get vector number
283	# Test %ecx against mask of vectors that push error code.
284	cmpl $31,%ecx
285	ja 0f
286	movl $1,%eax
287	salq %cl,%rax
288	testl $0x27d00,%eax
289	je 0f
290	popq %r8		# get error code
2910:	movq 0(%rsp),%rcx	# get ip
292	movq 8(%rsp),%rdx	# get cs
293	xorl %eax,%eax
294	leaq early_idt_msg(%rip),%rdi
295	call early_printk
296	cmpl $2,early_recursion_flag(%rip)
297	jz  1f
298	call dump_stack
299#ifdef CONFIG_KALLSYMS
300	leaq early_idt_ripmsg(%rip),%rdi
301	movq 8(%rsp),%rsi	# get rip again
302	call __print_symbol
303#endif
304#endif /* EARLY_PRINTK */
3051:	hlt
306	jmp 1b
307
308#ifdef CONFIG_EARLY_PRINTK
309early_recursion_flag:
310	.long 0
311
312early_idt_msg:
313	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
314early_idt_ripmsg:
315	.asciz "RIP %s\n"
316#endif /* CONFIG_EARLY_PRINTK */
317	.previous
318
319.balign PAGE_SIZE
320
321#define NEXT_PAGE(name) \
322	.balign	PAGE_SIZE; \
323ENTRY(name)
324
325/* Automate the creation of 1 to 1 mapping pmd entries */
326#define PMDS(START, PERM, COUNT)			\
327	i = 0 ;						\
328	.rept (COUNT) ;					\
329	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
330	i = i + 1 ;					\
331	.endr
332
333	/*
334	 * This default setting generates an ident mapping at address 0x100000
335	 * and a mapping for the kernel that precisely maps virtual address
336	 * 0xffffffff80000000 to physical address 0x000000. (always using
337	 * 2Mbyte large pages provided by PAE mode)
338	 */
339NEXT_PAGE(init_level4_pgt)
340	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
341	.fill	257,8,0
342	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
343	.fill	252,8,0
344	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
345	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
346
347NEXT_PAGE(level3_ident_pgt)
348	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
349	.fill	511,8,0
350
351NEXT_PAGE(level3_kernel_pgt)
352	.fill	510,8,0
353	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
354	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
355	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
356
357NEXT_PAGE(level2_fixmap_pgt)
358	.fill	506,8,0
359	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
360	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
361	.fill	5,8,0
362
363NEXT_PAGE(level1_fixmap_pgt)
364	.fill	512,8,0
365
366NEXT_PAGE(level2_ident_pgt)
367	/* Since I easily can, map the first 1G.
368	 * Don't set NX because code runs from these pages.
369	 */
370	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
371
372NEXT_PAGE(level2_kernel_pgt)
373	/*
374	 * 512 MB kernel mapping. We spend a full page on this pagetable
375	 * anyway.
376	 *
377	 * The kernel code+data+bss must not be bigger than that.
378	 *
379	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
380	 *  If you want to increase this then increase MODULES_VADDR
381	 *  too.)
382	 */
383	PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL,
384		KERNEL_IMAGE_SIZE/PMD_SIZE)
385
386NEXT_PAGE(level2_spare_pgt)
387	.fill   512, 8, 0
388
389#undef PMDS
390#undef NEXT_PAGE
391
392	.data
393	.align 16
394	.globl early_gdt_descr
395early_gdt_descr:
396	.word	GDT_ENTRIES*8-1
397	.quad   per_cpu__gdt_page
398
399ENTRY(phys_base)
400	/* This must match the first entry in level2_kernel_pgt */
401	.quad   0x0000000000000000
402
403
404	.section .bss, "aw", @nobits
405	.align L1_CACHE_BYTES
406ENTRY(idt_table)
407	.skip 256 * 16
408
409	.section .bss.page_aligned, "aw", @nobits
410	.align PAGE_SIZE
411ENTRY(empty_zero_page)
412	.skip PAGE_SIZE
413