xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision 749c970a)
1/*
2 *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
3 *
4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9 */
10
11
12#include <linux/linkage.h>
13#include <linux/threads.h>
14#include <linux/init.h>
15#include <asm/desc.h>
16#include <asm/segment.h>
17#include <asm/pgtable.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21
22#ifdef CONFIG_PARAVIRT
23#include <asm/asm-offsets.h>
24#include <asm/paravirt.h>
25#else
26#define GET_CR2_INTO_RCX movq %cr2, %rcx
27#endif
28
29/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
30 * because we need identity-mapped pages.
31 *
32 */
33
34	.text
35	.section .text.head
36	.code64
37	.globl startup_64
38startup_64:
39
40	/*
41	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
42	 * and someone has loaded an identity mapped page table
43	 * for us.  These identity mapped page tables map all of the
44	 * kernel pages and possibly all of memory.
45	 *
46	 * %esi holds a physical pointer to real_mode_data.
47	 *
48	 * We come here either directly from a 64bit bootloader, or from
49	 * arch/x86_64/boot/compressed/head.S.
50	 *
51	 * We only come here initially at boot nothing else comes here.
52	 *
53	 * Since we may be loaded at an address different from what we were
54	 * compiled to run at we first fixup the physical addresses in our page
55	 * tables and then reload them.
56	 */
57
58	/* Compute the delta between the address I am compiled to run at and the
59	 * address I am actually running at.
60	 */
61	leaq	_text(%rip), %rbp
62	subq	$_text - __START_KERNEL_map, %rbp
63
64	/* Is the address not 2M aligned? */
65	movq	%rbp, %rax
66	andl	$~PMD_PAGE_MASK, %eax
67	testl	%eax, %eax
68	jnz	bad_address
69
70	/* Is the address too large? */
71	leaq	_text(%rip), %rdx
72	movq	$PGDIR_SIZE, %rax
73	cmpq	%rax, %rdx
74	jae	bad_address
75
76	/* Fixup the physical addresses in the page table
77	 */
78	addq	%rbp, init_level4_pgt + 0(%rip)
79	addq	%rbp, init_level4_pgt + (258*8)(%rip)
80	addq	%rbp, init_level4_pgt + (511*8)(%rip)
81
82	addq	%rbp, level3_ident_pgt + 0(%rip)
83
84	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
85	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
86
87	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
88
89	/* Add an Identity mapping if I am above 1G */
90	leaq	_text(%rip), %rdi
91	andq	$PMD_PAGE_MASK, %rdi
92
93	movq	%rdi, %rax
94	shrq	$PUD_SHIFT, %rax
95	andq	$(PTRS_PER_PUD - 1), %rax
96	jz	ident_complete
97
98	leaq	(level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
99	leaq	level3_ident_pgt(%rip), %rbx
100	movq	%rdx, 0(%rbx, %rax, 8)
101
102	movq	%rdi, %rax
103	shrq	$PMD_SHIFT, %rax
104	andq	$(PTRS_PER_PMD - 1), %rax
105	leaq	__PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
106	leaq	level2_spare_pgt(%rip), %rbx
107	movq	%rdx, 0(%rbx, %rax, 8)
108ident_complete:
109
110	/*
111	 * Fixup the kernel text+data virtual addresses. Note that
112	 * we might write invalid pmds, when the kernel is relocated
113	 * cleanup_highmap() fixes this up along with the mappings
114	 * beyond _end.
115	 */
116
117	leaq	level2_kernel_pgt(%rip), %rdi
118	leaq	4096(%rdi), %r8
119	/* See if it is a valid page table entry */
1201:	testq	$1, 0(%rdi)
121	jz	2f
122	addq	%rbp, 0(%rdi)
123	/* Go to the next page */
1242:	addq	$8, %rdi
125	cmp	%r8, %rdi
126	jne	1b
127
128	/* Fixup phys_base */
129	addq	%rbp, phys_base(%rip)
130
131#ifdef CONFIG_SMP
132	addq	%rbp, trampoline_level4_pgt + 0(%rip)
133	addq	%rbp, trampoline_level4_pgt + (511*8)(%rip)
134#endif
135#ifdef CONFIG_ACPI_SLEEP
136	addq	%rbp, wakeup_level4_pgt + 0(%rip)
137	addq	%rbp, wakeup_level4_pgt + (511*8)(%rip)
138#endif
139
140	/* Due to ENTRY(), sometimes the empty space gets filled with
141	 * zeros. Better take a jmp than relying on empty space being
142	 * filled with 0x90 (nop)
143	 */
144	jmp secondary_startup_64
145ENTRY(secondary_startup_64)
146	/*
147	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
148	 * and someone has loaded a mapped page table.
149	 *
150	 * %esi holds a physical pointer to real_mode_data.
151	 *
152	 * We come here either from startup_64 (using physical addresses)
153	 * or from trampoline.S (using virtual addresses).
154	 *
155	 * Using virtual addresses from trampoline.S removes the need
156	 * to have any identity mapped pages in the kernel page table
157	 * after the boot processor executes this code.
158	 */
159
160	/* Enable PAE mode and PGE */
161	xorq	%rax, %rax
162	btsq	$5, %rax
163	btsq	$7, %rax
164	movq	%rax, %cr4
165
166	/* Setup early boot stage 4 level pagetables. */
167	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
168	addq	phys_base(%rip), %rax
169	movq	%rax, %cr3
170
171	/* Ensure I am executing from virtual addresses */
172	movq	$1f, %rax
173	jmp	*%rax
1741:
175
176	/* Check if nx is implemented */
177	movl	$0x80000001, %eax
178	cpuid
179	movl	%edx,%edi
180
181	/* Setup EFER (Extended Feature Enable Register) */
182	movl	$MSR_EFER, %ecx
183	rdmsr
184	btsl	$_EFER_SCE, %eax	/* Enable System Call */
185	btl	$20,%edi		/* No Execute supported? */
186	jnc     1f
187	btsl	$_EFER_NX, %eax
1881:	wrmsr				/* Make changes effective */
189
190	/* Setup cr0 */
191#define CR0_PM				1		/* protected mode */
192#define CR0_MP				(1<<1)
193#define CR0_ET				(1<<4)
194#define CR0_NE				(1<<5)
195#define CR0_WP				(1<<16)
196#define CR0_AM				(1<<18)
197#define CR0_PAGING 			(1<<31)
198	movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax
199	/* Make changes effective */
200	movq	%rax, %cr0
201
202	/* Setup a boot time stack */
203	movq init_rsp(%rip),%rsp
204
205	/* zero EFLAGS after setting rsp */
206	pushq $0
207	popfq
208
209	/*
210	 * We must switch to a new descriptor in kernel space for the GDT
211	 * because soon the kernel won't have access anymore to the userspace
212	 * addresses where we're currently running on. We have to do that here
213	 * because in 32bit we couldn't load a 64bit linear address.
214	 */
215	lgdt	cpu_gdt_descr(%rip)
216
217	/* set up data segments. actually 0 would do too */
218	movl $__KERNEL_DS,%eax
219	movl %eax,%ds
220	movl %eax,%ss
221	movl %eax,%es
222
223	/*
224	 * We don't really need to load %fs or %gs, but load them anyway
225	 * to kill any stale realmode selectors.  This allows execution
226	 * under VT hardware.
227	 */
228	movl %eax,%fs
229	movl %eax,%gs
230
231	/*
232	 * Setup up a dummy PDA. this is just for some early bootup code
233	 * that does in_interrupt()
234	 */
235	movl	$MSR_GS_BASE,%ecx
236	movq	$empty_zero_page,%rax
237	movq    %rax,%rdx
238	shrq	$32,%rdx
239	wrmsr
240
241	/* esi is pointer to real mode structure with interesting info.
242	   pass it to C */
243	movl	%esi, %edi
244
245	/* Finally jump to run C code and to be on real kernel address
246	 * Since we are running on identity-mapped space we have to jump
247	 * to the full 64bit address, this is only possible as indirect
248	 * jump.  In addition we need to ensure %cs is set so we make this
249	 * a far return.
250	 */
251	movq	initial_code(%rip),%rax
252	pushq	$0		# fake return address to stop unwinder
253	pushq	$__KERNEL_CS	# set correct cs
254	pushq	%rax		# target address in negative space
255	lretq
256
257	/* SMP bootup changes these two */
258	__REFDATA
259	.align	8
260	ENTRY(initial_code)
261	.quad	x86_64_start_kernel
262	__FINITDATA
263
264	ENTRY(init_rsp)
265	.quad  init_thread_union+THREAD_SIZE-8
266
267bad_address:
268	jmp bad_address
269
270#ifdef CONFIG_EARLY_PRINTK
271	.globl early_idt_handlers
272early_idt_handlers:
273	i = 0
274	.rept NUM_EXCEPTION_VECTORS
275	movl $i, %esi
276	jmp early_idt_handler
277	i = i + 1
278	.endr
279#endif
280
281ENTRY(early_idt_handler)
282#ifdef CONFIG_EARLY_PRINTK
283	cmpl $2,early_recursion_flag(%rip)
284	jz  1f
285	incl early_recursion_flag(%rip)
286	GET_CR2_INTO_RCX
287	movq %rcx,%r9
288	xorl %r8d,%r8d		# zero for error code
289	movl %esi,%ecx		# get vector number
290	# Test %ecx against mask of vectors that push error code.
291	cmpl $31,%ecx
292	ja 0f
293	movl $1,%eax
294	salq %cl,%rax
295	testl $0x27d00,%eax
296	je 0f
297	popq %r8		# get error code
2980:	movq 0(%rsp),%rcx	# get ip
299	movq 8(%rsp),%rdx	# get cs
300	xorl %eax,%eax
301	leaq early_idt_msg(%rip),%rdi
302	call early_printk
303	cmpl $2,early_recursion_flag(%rip)
304	jz  1f
305	call dump_stack
306#ifdef CONFIG_KALLSYMS
307	leaq early_idt_ripmsg(%rip),%rdi
308	movq 8(%rsp),%rsi	# get rip again
309	call __print_symbol
310#endif
311#endif /* EARLY_PRINTK */
3121:	hlt
313	jmp 1b
314
315#ifdef CONFIG_EARLY_PRINTK
316early_recursion_flag:
317	.long 0
318
319early_idt_msg:
320	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
321early_idt_ripmsg:
322	.asciz "RIP %s\n"
323#endif /* CONFIG_EARLY_PRINTK */
324
325.balign PAGE_SIZE
326
327#define NEXT_PAGE(name) \
328	.balign	PAGE_SIZE; \
329ENTRY(name)
330
331/* Automate the creation of 1 to 1 mapping pmd entries */
332#define PMDS(START, PERM, COUNT)		\
333	i = 0 ;					\
334	.rept (COUNT) ;				\
335	.quad	(START) + (i << 21) + (PERM) ;	\
336	i = i + 1 ;				\
337	.endr
338
339	/*
340	 * This default setting generates an ident mapping at address 0x100000
341	 * and a mapping for the kernel that precisely maps virtual address
342	 * 0xffffffff80000000 to physical address 0x000000. (always using
343	 * 2Mbyte large pages provided by PAE mode)
344	 */
345NEXT_PAGE(init_level4_pgt)
346	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
347	.fill	257,8,0
348	.quad	level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
349	.fill	252,8,0
350	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
351	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
352
353NEXT_PAGE(level3_ident_pgt)
354	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
355	.fill	511,8,0
356
357NEXT_PAGE(level3_kernel_pgt)
358	.fill	510,8,0
359	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
360	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
361	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
362
363NEXT_PAGE(level2_fixmap_pgt)
364	.fill	506,8,0
365	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
366	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
367	.fill	5,8,0
368
369NEXT_PAGE(level1_fixmap_pgt)
370	.fill	512,8,0
371
372NEXT_PAGE(level2_ident_pgt)
373	/* Since I easily can, map the first 1G.
374	 * Don't set NX because code runs from these pages.
375	 */
376	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
377
378NEXT_PAGE(level2_kernel_pgt)
379	/*
380	 * 512 MB kernel mapping. We spend a full page on this pagetable
381	 * anyway.
382	 *
383	 * The kernel code+data+bss must not be bigger than that.
384	 *
385	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
386	 *  If you want to increase this then increase MODULES_VADDR
387	 *  too.)
388	 */
389	PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL,
390		KERNEL_IMAGE_SIZE/PMD_SIZE)
391
392NEXT_PAGE(level2_spare_pgt)
393	.fill   512, 8, 0
394
395#undef PMDS
396#undef NEXT_PAGE
397
398	.data
399	.align 16
400	.globl cpu_gdt_descr
401cpu_gdt_descr:
402	.word	gdt_end-cpu_gdt_table-1
403gdt:
404	.quad	cpu_gdt_table
405#ifdef CONFIG_SMP
406	.rept	NR_CPUS-1
407	.word	0
408	.quad	0
409	.endr
410#endif
411
412ENTRY(phys_base)
413	/* This must match the first entry in level2_kernel_pgt */
414	.quad   0x0000000000000000
415
416/* We need valid kernel segments for data and code in long mode too
417 * IRET will check the segment types  kkeil 2000/10/28
418 * Also sysret mandates a special GDT layout
419 */
420
421	.section .data.page_aligned, "aw"
422	.align PAGE_SIZE
423
424/* The TLS descriptors are currently at a different place compared to i386.
425   Hopefully nobody expects them at a fixed place (Wine?) */
426
427ENTRY(cpu_gdt_table)
428	.quad	0x0000000000000000	/* NULL descriptor */
429	.quad	0x00cf9b000000ffff	/* __KERNEL32_CS */
430	.quad	0x00af9b000000ffff	/* __KERNEL_CS */
431	.quad	0x00cf93000000ffff	/* __KERNEL_DS */
432	.quad	0x00cffb000000ffff	/* __USER32_CS */
433	.quad	0x00cff3000000ffff	/* __USER_DS, __USER32_DS  */
434	.quad	0x00affb000000ffff	/* __USER_CS */
435	.quad	0x0			/* unused */
436	.quad	0,0			/* TSS */
437	.quad	0,0			/* LDT */
438	.quad   0,0,0			/* three TLS descriptors */
439	.quad	0x0000f40000000000	/* node/CPU stored in limit */
440gdt_end:
441	/* asm/segment.h:GDT_ENTRIES must match this */
442	/* This should be a multiple of the cache line size */
443	/* GDTs of other CPUs are now dynamically allocated */
444
445	/* zero the remaining page */
446	.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
447
448	.section .bss, "aw", @nobits
449	.align L1_CACHE_BYTES
450ENTRY(idt_table)
451	.skip 256 * 16
452
453	.section .bss.page_aligned, "aw", @nobits
454	.align PAGE_SIZE
455ENTRY(empty_zero_page)
456	.skip PAGE_SIZE
457