xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision 1256276c)
1/*
2 *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
3 *
4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9 */
10
11
12#include <linux/linkage.h>
13#include <linux/threads.h>
14#include <linux/init.h>
15#include <asm/segment.h>
16#include <asm/pgtable.h>
17#include <asm/page.h>
18#include <asm/msr.h>
19#include <asm/cache.h>
20#include <asm/processor-flags.h>
21#include <asm/percpu.h>
22#include <asm/nops.h>
23
24#ifdef CONFIG_PARAVIRT
25#include <asm/asm-offsets.h>
26#include <asm/paravirt.h>
27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
28#else
29#define GET_CR2_INTO(reg) movq %cr2, reg
30#define INTERRUPT_RETURN iretq
31#endif
32
33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
34 * because we need identity-mapped pages.
35 *
36 */
37
38#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
39
40L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
41L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
42L4_START_KERNEL = pgd_index(__START_KERNEL_map)
43L3_START_KERNEL = pud_index(__START_KERNEL_map)
44
45	.text
46	__HEAD
47	.code64
48	.globl startup_64
49startup_64:
50
51	/*
52	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
53	 * and someone has loaded an identity mapped page table
54	 * for us.  These identity mapped page tables map all of the
55	 * kernel pages and possibly all of memory.
56	 *
57	 * %esi holds a physical pointer to real_mode_data.
58	 *
59	 * We come here either directly from a 64bit bootloader, or from
60	 * arch/x86_64/boot/compressed/head.S.
61	 *
62	 * We only come here initially at boot nothing else comes here.
63	 *
64	 * Since we may be loaded at an address different from what we were
65	 * compiled to run at we first fixup the physical addresses in our page
66	 * tables and then reload them.
67	 */
68
69	/* Compute the delta between the address I am compiled to run at and the
70	 * address I am actually running at.
71	 */
72	leaq	_text(%rip), %rbp
73	subq	$_text - __START_KERNEL_map, %rbp
74
75	/* Is the address not 2M aligned? */
76	movq	%rbp, %rax
77	andl	$~PMD_PAGE_MASK, %eax
78	testl	%eax, %eax
79	jnz	bad_address
80
81	/* Is the address too large? */
82	leaq	_text(%rip), %rdx
83	movq	$PGDIR_SIZE, %rax
84	cmpq	%rax, %rdx
85	jae	bad_address
86
87	/* Fixup the physical addresses in the page table
88	 */
89	addq	%rbp, init_level4_pgt + 0(%rip)
90	addq	%rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
91	addq	%rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
92
93	addq	%rbp, level3_ident_pgt + 0(%rip)
94
95	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
96	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
97
98	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
99
100	/* Add an Identity mapping if I am above 1G */
101	leaq	_text(%rip), %rdi
102	andq	$PMD_PAGE_MASK, %rdi
103
104	movq	%rdi, %rax
105	shrq	$PUD_SHIFT, %rax
106	andq	$(PTRS_PER_PUD - 1), %rax
107	jz	ident_complete
108
109	leaq	(level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
110	leaq	level3_ident_pgt(%rip), %rbx
111	movq	%rdx, 0(%rbx, %rax, 8)
112
113	movq	%rdi, %rax
114	shrq	$PMD_SHIFT, %rax
115	andq	$(PTRS_PER_PMD - 1), %rax
116	leaq	__PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
117	leaq	level2_spare_pgt(%rip), %rbx
118	movq	%rdx, 0(%rbx, %rax, 8)
119ident_complete:
120
121	/*
122	 * Fixup the kernel text+data virtual addresses. Note that
123	 * we might write invalid pmds, when the kernel is relocated
124	 * cleanup_highmap() fixes this up along with the mappings
125	 * beyond _end.
126	 */
127
128	leaq	level2_kernel_pgt(%rip), %rdi
129	leaq	4096(%rdi), %r8
130	/* See if it is a valid page table entry */
1311:	testq	$1, 0(%rdi)
132	jz	2f
133	addq	%rbp, 0(%rdi)
134	/* Go to the next page */
1352:	addq	$8, %rdi
136	cmp	%r8, %rdi
137	jne	1b
138
139	/* Fixup phys_base */
140	addq	%rbp, phys_base(%rip)
141
142	/* Due to ENTRY(), sometimes the empty space gets filled with
143	 * zeros. Better take a jmp than relying on empty space being
144	 * filled with 0x90 (nop)
145	 */
146	jmp secondary_startup_64
147ENTRY(secondary_startup_64)
148	/*
149	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
150	 * and someone has loaded a mapped page table.
151	 *
152	 * %esi holds a physical pointer to real_mode_data.
153	 *
154	 * We come here either from startup_64 (using physical addresses)
155	 * or from trampoline.S (using virtual addresses).
156	 *
157	 * Using virtual addresses from trampoline.S removes the need
158	 * to have any identity mapped pages in the kernel page table
159	 * after the boot processor executes this code.
160	 */
161
162	/* Enable PAE mode and PGE */
163	movl	$(X86_CR4_PAE | X86_CR4_PGE), %eax
164	movq	%rax, %cr4
165
166	/* Setup early boot stage 4 level pagetables. */
167	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
168	addq	phys_base(%rip), %rax
169	movq	%rax, %cr3
170
171	/* Ensure I am executing from virtual addresses */
172	movq	$1f, %rax
173	jmp	*%rax
1741:
175
176	/* Check if nx is implemented */
177	movl	$0x80000001, %eax
178	cpuid
179	movl	%edx,%edi
180
181	/* Setup EFER (Extended Feature Enable Register) */
182	movl	$MSR_EFER, %ecx
183	rdmsr
184	btsl	$_EFER_SCE, %eax	/* Enable System Call */
185	btl	$20,%edi		/* No Execute supported? */
186	jnc     1f
187	btsl	$_EFER_NX, %eax
1881:	wrmsr				/* Make changes effective */
189
190	/* Setup cr0 */
191#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
192			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
193			 X86_CR0_PG)
194	movl	$CR0_STATE, %eax
195	/* Make changes effective */
196	movq	%rax, %cr0
197
198	/* Setup a boot time stack */
199	movq stack_start(%rip),%rsp
200
201	/* zero EFLAGS after setting rsp */
202	pushq $0
203	popfq
204
205	/*
206	 * We must switch to a new descriptor in kernel space for the GDT
207	 * because soon the kernel won't have access anymore to the userspace
208	 * addresses where we're currently running on. We have to do that here
209	 * because in 32bit we couldn't load a 64bit linear address.
210	 */
211	lgdt	early_gdt_descr(%rip)
212
213	/* set up data segments */
214	xorl %eax,%eax
215	movl %eax,%ds
216	movl %eax,%ss
217	movl %eax,%es
218
219	/*
220	 * We don't really need to load %fs or %gs, but load them anyway
221	 * to kill any stale realmode selectors.  This allows execution
222	 * under VT hardware.
223	 */
224	movl %eax,%fs
225	movl %eax,%gs
226
227	/* Set up %gs.
228	 *
229	 * The base of %gs always points to the bottom of the irqstack
230	 * union.  If the stack protector canary is enabled, it is
231	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
232	 * init data section till per cpu areas are set up.
233	 */
234	movl	$MSR_GS_BASE,%ecx
235	movl	initial_gs(%rip),%eax
236	movl	initial_gs+4(%rip),%edx
237	wrmsr
238
239	/* esi is pointer to real mode structure with interesting info.
240	   pass it to C */
241	movl	%esi, %edi
242
243	/* Finally jump to run C code and to be on real kernel address
244	 * Since we are running on identity-mapped space we have to jump
245	 * to the full 64bit address, this is only possible as indirect
246	 * jump.  In addition we need to ensure %cs is set so we make this
247	 * a far return.
248	 */
249	movq	initial_code(%rip),%rax
250	pushq	$0		# fake return address to stop unwinder
251	pushq	$__KERNEL_CS	# set correct cs
252	pushq	%rax		# target address in negative space
253	lretq
254
255#ifdef CONFIG_HOTPLUG_CPU
256/*
257 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
258 * up already except stack. We just set up stack here. Then call
259 * start_secondary().
260 */
261ENTRY(start_cpu0)
262	movq stack_start(%rip),%rsp
263	movq	initial_code(%rip),%rax
264	pushq	$0		# fake return address to stop unwinder
265	pushq	$__KERNEL_CS	# set correct cs
266	pushq	%rax		# target address in negative space
267	lretq
268ENDPROC(start_cpu0)
269#endif
270
271	/* SMP bootup changes these two */
272	__REFDATA
273	.align	8
274	ENTRY(initial_code)
275	.quad	x86_64_start_kernel
276	ENTRY(initial_gs)
277	.quad	INIT_PER_CPU_VAR(irq_stack_union)
278
279	ENTRY(stack_start)
280	.quad  init_thread_union+THREAD_SIZE-8
281	.word  0
282	__FINITDATA
283
284bad_address:
285	jmp bad_address
286
287	.section ".init.text","ax"
288	.globl early_idt_handlers
289early_idt_handlers:
290	# 104(%rsp) %rflags
291	#  96(%rsp) %cs
292	#  88(%rsp) %rip
293	#  80(%rsp) error code
294	i = 0
295	.rept NUM_EXCEPTION_VECTORS
296	.if (EXCEPTION_ERRCODE_MASK >> i) & 1
297	ASM_NOP2
298	.else
299	pushq $0		# Dummy error code, to make stack frame uniform
300	.endif
301	pushq $i		# 72(%rsp) Vector number
302	jmp early_idt_handler
303	i = i + 1
304	.endr
305
306ENTRY(early_idt_handler)
307	cld
308
309	cmpl $2,early_recursion_flag(%rip)
310	jz  1f
311	incl early_recursion_flag(%rip)
312
313	pushq %rax		# 64(%rsp)
314	pushq %rcx		# 56(%rsp)
315	pushq %rdx		# 48(%rsp)
316	pushq %rsi		# 40(%rsp)
317	pushq %rdi		# 32(%rsp)
318	pushq %r8		# 24(%rsp)
319	pushq %r9		# 16(%rsp)
320	pushq %r10		#  8(%rsp)
321	pushq %r11		#  0(%rsp)
322
323	cmpl $__KERNEL_CS,96(%rsp)
324	jne 10f
325
326	leaq 88(%rsp),%rdi	# Pointer to %rip
327	call early_fixup_exception
328	andl %eax,%eax
329	jnz 20f			# Found an exception entry
330
33110:
332#ifdef CONFIG_EARLY_PRINTK
333	GET_CR2_INTO(%r9)	# can clobber any volatile register if pv
334	movl 80(%rsp),%r8d	# error code
335	movl 72(%rsp),%esi	# vector number
336	movl 96(%rsp),%edx	# %cs
337	movq 88(%rsp),%rcx	# %rip
338	xorl %eax,%eax
339	leaq early_idt_msg(%rip),%rdi
340	call early_printk
341	cmpl $2,early_recursion_flag(%rip)
342	jz  1f
343	call dump_stack
344#ifdef CONFIG_KALLSYMS
345	leaq early_idt_ripmsg(%rip),%rdi
346	movq 40(%rsp),%rsi	# %rip again
347	call __print_symbol
348#endif
349#endif /* EARLY_PRINTK */
3501:	hlt
351	jmp 1b
352
35320:	# Exception table entry found
354	popq %r11
355	popq %r10
356	popq %r9
357	popq %r8
358	popq %rdi
359	popq %rsi
360	popq %rdx
361	popq %rcx
362	popq %rax
363	addq $16,%rsp		# drop vector number and error code
364	decl early_recursion_flag(%rip)
365	INTERRUPT_RETURN
366
367	.balign 4
368early_recursion_flag:
369	.long 0
370
371#ifdef CONFIG_EARLY_PRINTK
372early_idt_msg:
373	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
374early_idt_ripmsg:
375	.asciz "RIP %s\n"
376#endif /* CONFIG_EARLY_PRINTK */
377	.previous
378
379#define NEXT_PAGE(name) \
380	.balign	PAGE_SIZE; \
381ENTRY(name)
382
383/* Automate the creation of 1 to 1 mapping pmd entries */
384#define PMDS(START, PERM, COUNT)			\
385	i = 0 ;						\
386	.rept (COUNT) ;					\
387	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
388	i = i + 1 ;					\
389	.endr
390
391	.data
392	/*
393	 * This default setting generates an ident mapping at address 0x100000
394	 * and a mapping for the kernel that precisely maps virtual address
395	 * 0xffffffff80000000 to physical address 0x000000. (always using
396	 * 2Mbyte large pages provided by PAE mode)
397	 */
398NEXT_PAGE(init_level4_pgt)
399	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
400	.org	init_level4_pgt + L4_PAGE_OFFSET*8, 0
401	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
402	.org	init_level4_pgt + L4_START_KERNEL*8, 0
403	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
404	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
405
406NEXT_PAGE(level3_ident_pgt)
407	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
408	.fill	511,8,0
409
410NEXT_PAGE(level3_kernel_pgt)
411	.fill	L3_START_KERNEL,8,0
412	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
413	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
414	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
415
416NEXT_PAGE(level2_fixmap_pgt)
417	.fill	506,8,0
418	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
419	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
420	.fill	5,8,0
421
422NEXT_PAGE(level1_fixmap_pgt)
423	.fill	512,8,0
424
425NEXT_PAGE(level2_ident_pgt)
426	/* Since I easily can, map the first 1G.
427	 * Don't set NX because code runs from these pages.
428	 */
429	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
430
431NEXT_PAGE(level2_kernel_pgt)
432	/*
433	 * 512 MB kernel mapping. We spend a full page on this pagetable
434	 * anyway.
435	 *
436	 * The kernel code+data+bss must not be bigger than that.
437	 *
438	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
439	 *  If you want to increase this then increase MODULES_VADDR
440	 *  too.)
441	 */
442	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
443		KERNEL_IMAGE_SIZE/PMD_SIZE)
444
445NEXT_PAGE(level2_spare_pgt)
446	.fill   512, 8, 0
447
448#undef PMDS
449#undef NEXT_PAGE
450
451	.data
452	.align 16
453	.globl early_gdt_descr
454early_gdt_descr:
455	.word	GDT_ENTRIES*8-1
456early_gdt_descr_base:
457	.quad	INIT_PER_CPU_VAR(gdt_page)
458
459ENTRY(phys_base)
460	/* This must match the first entry in level2_kernel_pgt */
461	.quad   0x0000000000000000
462
463#include "../../x86/xen/xen-head.S"
464
465	.section .bss, "aw", @nobits
466	.align L1_CACHE_BYTES
467ENTRY(idt_table)
468	.skip IDT_ENTRIES * 16
469
470	.align L1_CACHE_BYTES
471ENTRY(nmi_idt_table)
472	.skip IDT_ENTRIES * 16
473
474	__PAGE_ALIGNED_BSS
475	.align PAGE_SIZE
476ENTRY(empty_zero_page)
477	.skip PAGE_SIZE
478