xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision ef7f0d6a)
1250c2277SThomas Gleixner/*
2250c2277SThomas Gleixner *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
3250c2277SThomas Gleixner *
4250c2277SThomas Gleixner *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5250c2277SThomas Gleixner *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6250c2277SThomas Gleixner *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7250c2277SThomas Gleixner *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8250c2277SThomas Gleixner *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9250c2277SThomas Gleixner */
10250c2277SThomas Gleixner
11250c2277SThomas Gleixner
12250c2277SThomas Gleixner#include <linux/linkage.h>
13250c2277SThomas Gleixner#include <linux/threads.h>
14250c2277SThomas Gleixner#include <linux/init.h>
15250c2277SThomas Gleixner#include <asm/segment.h>
16250c2277SThomas Gleixner#include <asm/pgtable.h>
17250c2277SThomas Gleixner#include <asm/page.h>
18250c2277SThomas Gleixner#include <asm/msr.h>
19250c2277SThomas Gleixner#include <asm/cache.h>
20369101daSCyrill Gorcunov#include <asm/processor-flags.h>
21b12d8db8STejun Heo#include <asm/percpu.h>
229900aa2fSH. Peter Anvin#include <asm/nops.h>
23250c2277SThomas Gleixner
2449a69787SGlauber de Oliveira Costa#ifdef CONFIG_PARAVIRT
2549a69787SGlauber de Oliveira Costa#include <asm/asm-offsets.h>
2649a69787SGlauber de Oliveira Costa#include <asm/paravirt.h>
27ffc4bc9cSH. Peter Anvin#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
2849a69787SGlauber de Oliveira Costa#else
29ffc4bc9cSH. Peter Anvin#define GET_CR2_INTO(reg) movq %cr2, reg
309900aa2fSH. Peter Anvin#define INTERRUPT_RETURN iretq
3149a69787SGlauber de Oliveira Costa#endif
3249a69787SGlauber de Oliveira Costa
333ad2f3fbSDaniel Mack/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
34250c2277SThomas Gleixner * because we need identity-mapped pages.
35250c2277SThomas Gleixner *
36250c2277SThomas Gleixner */
37250c2277SThomas Gleixner
38a6523748SEduardo Habkost#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
39a6523748SEduardo Habkost
40a6523748SEduardo HabkostL4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
41a6523748SEduardo HabkostL3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
42a6523748SEduardo HabkostL4_START_KERNEL = pgd_index(__START_KERNEL_map)
43a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map)
44a6523748SEduardo Habkost
45250c2277SThomas Gleixner	.text
464ae59b91STim Abbott	__HEAD
47250c2277SThomas Gleixner	.code64
48250c2277SThomas Gleixner	.globl startup_64
49250c2277SThomas Gleixnerstartup_64:
50250c2277SThomas Gleixner	/*
511256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
52250c2277SThomas Gleixner	 * and someone has loaded an identity mapped page table
53250c2277SThomas Gleixner	 * for us.  These identity mapped page tables map all of the
54250c2277SThomas Gleixner	 * kernel pages and possibly all of memory.
55250c2277SThomas Gleixner	 *
568170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
57250c2277SThomas Gleixner	 *
58250c2277SThomas Gleixner	 * We come here either directly from a 64bit bootloader, or from
59250c2277SThomas Gleixner	 * arch/x86_64/boot/compressed/head.S.
60250c2277SThomas Gleixner	 *
61250c2277SThomas Gleixner	 * We only come here initially at boot nothing else comes here.
62250c2277SThomas Gleixner	 *
63250c2277SThomas Gleixner	 * Since we may be loaded at an address different from what we were
64250c2277SThomas Gleixner	 * compiled to run at we first fixup the physical addresses in our page
65250c2277SThomas Gleixner	 * tables and then reload them.
66250c2277SThomas Gleixner	 */
67250c2277SThomas Gleixner
688170e6beSH. Peter Anvin	/*
698170e6beSH. Peter Anvin	 * Compute the delta between the address I am compiled to run at and the
70250c2277SThomas Gleixner	 * address I am actually running at.
71250c2277SThomas Gleixner	 */
72250c2277SThomas Gleixner	leaq	_text(%rip), %rbp
73250c2277SThomas Gleixner	subq	$_text - __START_KERNEL_map, %rbp
74250c2277SThomas Gleixner
75250c2277SThomas Gleixner	/* Is the address not 2M aligned? */
76250c2277SThomas Gleixner	movq	%rbp, %rax
7731422c51SAndi Kleen	andl	$~PMD_PAGE_MASK, %eax
78250c2277SThomas Gleixner	testl	%eax, %eax
79250c2277SThomas Gleixner	jnz	bad_address
80250c2277SThomas Gleixner
818170e6beSH. Peter Anvin	/*
828170e6beSH. Peter Anvin	 * Is the address too large?
83250c2277SThomas Gleixner	 */
848170e6beSH. Peter Anvin	leaq	_text(%rip), %rax
858170e6beSH. Peter Anvin	shrq	$MAX_PHYSMEM_BITS, %rax
868170e6beSH. Peter Anvin	jnz	bad_address
87250c2277SThomas Gleixner
888170e6beSH. Peter Anvin	/*
898170e6beSH. Peter Anvin	 * Fixup the physical addresses in the page table
908170e6beSH. Peter Anvin	 */
918170e6beSH. Peter Anvin	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
92250c2277SThomas Gleixner
93250c2277SThomas Gleixner	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
94250c2277SThomas Gleixner	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
95250c2277SThomas Gleixner
96250c2277SThomas Gleixner	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
97250c2277SThomas Gleixner
988170e6beSH. Peter Anvin	/*
998170e6beSH. Peter Anvin	 * Set up the identity mapping for the switchover.  These
1008170e6beSH. Peter Anvin	 * entries should *NOT* have the global bit set!  This also
1018170e6beSH. Peter Anvin	 * creates a bunch of nonsense entries but that is fine --
1028170e6beSH. Peter Anvin	 * it avoids problems around wraparound.
1038170e6beSH. Peter Anvin	 */
104250c2277SThomas Gleixner	leaq	_text(%rip), %rdi
1058170e6beSH. Peter Anvin	leaq	early_level4_pgt(%rip), %rbx
106250c2277SThomas Gleixner
107250c2277SThomas Gleixner	movq	%rdi, %rax
1088170e6beSH. Peter Anvin	shrq	$PGDIR_SHIFT, %rax
1098170e6beSH. Peter Anvin
1108170e6beSH. Peter Anvin	leaq	(4096 + _KERNPG_TABLE)(%rbx), %rdx
1118170e6beSH. Peter Anvin	movq	%rdx, 0(%rbx,%rax,8)
1128170e6beSH. Peter Anvin	movq	%rdx, 8(%rbx,%rax,8)
1138170e6beSH. Peter Anvin
1148170e6beSH. Peter Anvin	addq	$4096, %rdx
1158170e6beSH. Peter Anvin	movq	%rdi, %rax
116250c2277SThomas Gleixner	shrq	$PUD_SHIFT, %rax
1178170e6beSH. Peter Anvin	andl	$(PTRS_PER_PUD-1), %eax
118e9d0626eSZhang Yanfei	movq	%rdx, 4096(%rbx,%rax,8)
119e9d0626eSZhang Yanfei	incl	%eax
120e9d0626eSZhang Yanfei	andl	$(PTRS_PER_PUD-1), %eax
121e9d0626eSZhang Yanfei	movq	%rdx, 4096(%rbx,%rax,8)
122250c2277SThomas Gleixner
1238170e6beSH. Peter Anvin	addq	$8192, %rbx
124250c2277SThomas Gleixner	movq	%rdi, %rax
1258170e6beSH. Peter Anvin	shrq	$PMD_SHIFT, %rdi
1268170e6beSH. Peter Anvin	addq	$(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
1278170e6beSH. Peter Anvin	leaq	(_end - 1)(%rip), %rcx
1288170e6beSH. Peter Anvin	shrq	$PMD_SHIFT, %rcx
1298170e6beSH. Peter Anvin	subq	%rdi, %rcx
1308170e6beSH. Peter Anvin	incl	%ecx
1318170e6beSH. Peter Anvin
1328170e6beSH. Peter Anvin1:
1338170e6beSH. Peter Anvin	andq	$(PTRS_PER_PMD - 1), %rdi
1348170e6beSH. Peter Anvin	movq	%rax, (%rbx,%rdi,8)
1358170e6beSH. Peter Anvin	incq	%rdi
1368170e6beSH. Peter Anvin	addq	$PMD_SIZE, %rax
1378170e6beSH. Peter Anvin	decl	%ecx
1388170e6beSH. Peter Anvin	jnz	1b
139250c2277SThomas Gleixner
14031eedd82SThomas Gleixner	/*
14131eedd82SThomas Gleixner	 * Fixup the kernel text+data virtual addresses. Note that
14231eedd82SThomas Gleixner	 * we might write invalid pmds, when the kernel is relocated
14331eedd82SThomas Gleixner	 * cleanup_highmap() fixes this up along with the mappings
14431eedd82SThomas Gleixner	 * beyond _end.
145250c2277SThomas Gleixner	 */
146250c2277SThomas Gleixner	leaq	level2_kernel_pgt(%rip), %rdi
147250c2277SThomas Gleixner	leaq	4096(%rdi), %r8
148250c2277SThomas Gleixner	/* See if it is a valid page table entry */
149250c2277SThomas Gleixner1:	testq	$1, 0(%rdi)
150250c2277SThomas Gleixner	jz	2f
151250c2277SThomas Gleixner	addq	%rbp, 0(%rdi)
152250c2277SThomas Gleixner	/* Go to the next page */
153250c2277SThomas Gleixner2:	addq	$8, %rdi
154250c2277SThomas Gleixner	cmp	%r8, %rdi
155250c2277SThomas Gleixner	jne	1b
156250c2277SThomas Gleixner
157250c2277SThomas Gleixner	/* Fixup phys_base */
158250c2277SThomas Gleixner	addq	%rbp, phys_base(%rip)
159250c2277SThomas Gleixner
1608170e6beSH. Peter Anvin	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
1618170e6beSH. Peter Anvin	jmp 1f
162250c2277SThomas GleixnerENTRY(secondary_startup_64)
163250c2277SThomas Gleixner	/*
1641256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
165250c2277SThomas Gleixner	 * and someone has loaded a mapped page table.
166250c2277SThomas Gleixner	 *
1678170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
168250c2277SThomas Gleixner	 *
169250c2277SThomas Gleixner	 * We come here either from startup_64 (using physical addresses)
170250c2277SThomas Gleixner	 * or from trampoline.S (using virtual addresses).
171250c2277SThomas Gleixner	 *
172250c2277SThomas Gleixner	 * Using virtual addresses from trampoline.S removes the need
173250c2277SThomas Gleixner	 * to have any identity mapped pages in the kernel page table
174250c2277SThomas Gleixner	 * after the boot processor executes this code.
175250c2277SThomas Gleixner	 */
176250c2277SThomas Gleixner
1778170e6beSH. Peter Anvin	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
1788170e6beSH. Peter Anvin1:
1798170e6beSH. Peter Anvin
180250c2277SThomas Gleixner	/* Enable PAE mode and PGE */
1818170e6beSH. Peter Anvin	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
1828170e6beSH. Peter Anvin	movq	%rcx, %cr4
183250c2277SThomas Gleixner
184250c2277SThomas Gleixner	/* Setup early boot stage 4 level pagetables. */
185250c2277SThomas Gleixner	addq	phys_base(%rip), %rax
186250c2277SThomas Gleixner	movq	%rax, %cr3
187250c2277SThomas Gleixner
188250c2277SThomas Gleixner	/* Ensure I am executing from virtual addresses */
189250c2277SThomas Gleixner	movq	$1f, %rax
190250c2277SThomas Gleixner	jmp	*%rax
191250c2277SThomas Gleixner1:
192250c2277SThomas Gleixner
193250c2277SThomas Gleixner	/* Check if nx is implemented */
194250c2277SThomas Gleixner	movl	$0x80000001, %eax
195250c2277SThomas Gleixner	cpuid
196250c2277SThomas Gleixner	movl	%edx,%edi
197250c2277SThomas Gleixner
198250c2277SThomas Gleixner	/* Setup EFER (Extended Feature Enable Register) */
199250c2277SThomas Gleixner	movl	$MSR_EFER, %ecx
200250c2277SThomas Gleixner	rdmsr
201250c2277SThomas Gleixner	btsl	$_EFER_SCE, %eax	/* Enable System Call */
202250c2277SThomas Gleixner	btl	$20,%edi		/* No Execute supported? */
203250c2277SThomas Gleixner	jnc     1f
204250c2277SThomas Gleixner	btsl	$_EFER_NX, %eax
20578d77df7SH. Peter Anvin	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
206250c2277SThomas Gleixner1:	wrmsr				/* Make changes effective */
207250c2277SThomas Gleixner
208250c2277SThomas Gleixner	/* Setup cr0 */
209369101daSCyrill Gorcunov#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
210369101daSCyrill Gorcunov			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
211369101daSCyrill Gorcunov			 X86_CR0_PG)
212369101daSCyrill Gorcunov	movl	$CR0_STATE, %eax
213250c2277SThomas Gleixner	/* Make changes effective */
214250c2277SThomas Gleixner	movq	%rax, %cr0
215250c2277SThomas Gleixner
216250c2277SThomas Gleixner	/* Setup a boot time stack */
2179cf4f298SGlauber Costa	movq stack_start(%rip), %rsp
218250c2277SThomas Gleixner
219250c2277SThomas Gleixner	/* zero EFLAGS after setting rsp */
220250c2277SThomas Gleixner	pushq $0
221250c2277SThomas Gleixner	popfq
222250c2277SThomas Gleixner
223250c2277SThomas Gleixner	/*
224250c2277SThomas Gleixner	 * We must switch to a new descriptor in kernel space for the GDT
225250c2277SThomas Gleixner	 * because soon the kernel won't have access anymore to the userspace
226250c2277SThomas Gleixner	 * addresses where we're currently running on. We have to do that here
227250c2277SThomas Gleixner	 * because in 32bit we couldn't load a 64bit linear address.
228250c2277SThomas Gleixner	 */
229a939098aSGlauber Costa	lgdt	early_gdt_descr(%rip)
230250c2277SThomas Gleixner
2318ec6993dSBrian Gerst	/* set up data segments */
2328ec6993dSBrian Gerst	xorl %eax,%eax
233250c2277SThomas Gleixner	movl %eax,%ds
234250c2277SThomas Gleixner	movl %eax,%ss
235250c2277SThomas Gleixner	movl %eax,%es
236250c2277SThomas Gleixner
237250c2277SThomas Gleixner	/*
238250c2277SThomas Gleixner	 * We don't really need to load %fs or %gs, but load them anyway
239250c2277SThomas Gleixner	 * to kill any stale realmode selectors.  This allows execution
240250c2277SThomas Gleixner	 * under VT hardware.
241250c2277SThomas Gleixner	 */
242250c2277SThomas Gleixner	movl %eax,%fs
243250c2277SThomas Gleixner	movl %eax,%gs
244250c2277SThomas Gleixner
245f32ff538STejun Heo	/* Set up %gs.
246f32ff538STejun Heo	 *
247947e76cdSBrian Gerst	 * The base of %gs always points to the bottom of the irqstack
248947e76cdSBrian Gerst	 * union.  If the stack protector canary is enabled, it is
249947e76cdSBrian Gerst	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
250947e76cdSBrian Gerst	 * init data section till per cpu areas are set up.
251250c2277SThomas Gleixner	 */
252250c2277SThomas Gleixner	movl	$MSR_GS_BASE,%ecx
253650fb439SBrian Gerst	movl	initial_gs(%rip),%eax
254650fb439SBrian Gerst	movl	initial_gs+4(%rip),%edx
255250c2277SThomas Gleixner	wrmsr
256250c2277SThomas Gleixner
2578170e6beSH. Peter Anvin	/* rsi is pointer to real mode structure with interesting info.
258250c2277SThomas Gleixner	   pass it to C */
2598170e6beSH. Peter Anvin	movq	%rsi, %rdi
260250c2277SThomas Gleixner
261250c2277SThomas Gleixner	/* Finally jump to run C code and to be on real kernel address
262250c2277SThomas Gleixner	 * Since we are running on identity-mapped space we have to jump
263250c2277SThomas Gleixner	 * to the full 64bit address, this is only possible as indirect
264250c2277SThomas Gleixner	 * jump.  In addition we need to ensure %cs is set so we make this
265250c2277SThomas Gleixner	 * a far return.
2668170e6beSH. Peter Anvin	 *
2678170e6beSH. Peter Anvin	 * Note: do not change to far jump indirect with 64bit offset.
2688170e6beSH. Peter Anvin	 *
2698170e6beSH. Peter Anvin	 * AMD does not support far jump indirect with 64bit offset.
2708170e6beSH. Peter Anvin	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
2718170e6beSH. Peter Anvin	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
2728170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
2738170e6beSH. Peter Anvin	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
2748170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
2758170e6beSH. Peter Anvin	 *
2768170e6beSH. Peter Anvin	 * Intel64 does support 64bit offset.
2778170e6beSH. Peter Anvin	 * Software Developer Manual Vol 2: states:
2788170e6beSH. Peter Anvin	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
2798170e6beSH. Peter Anvin	 *		address given in m16:16
2808170e6beSH. Peter Anvin	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
2818170e6beSH. Peter Anvin	 *		address given in m16:32.
2828170e6beSH. Peter Anvin	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
2838170e6beSH. Peter Anvin	 *		address given in m16:64.
284250c2277SThomas Gleixner	 */
285250c2277SThomas Gleixner	movq	initial_code(%rip),%rax
286250c2277SThomas Gleixner	pushq	$0		# fake return address to stop unwinder
287250c2277SThomas Gleixner	pushq	$__KERNEL_CS	# set correct cs
288250c2277SThomas Gleixner	pushq	%rax		# target address in negative space
289250c2277SThomas Gleixner	lretq
290250c2277SThomas Gleixner
29142e78e97SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU
29242e78e97SFenghua Yu/*
29342e78e97SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
29442e78e97SFenghua Yu * up already except stack. We just set up stack here. Then call
29542e78e97SFenghua Yu * start_secondary().
29642e78e97SFenghua Yu */
29742e78e97SFenghua YuENTRY(start_cpu0)
29842e78e97SFenghua Yu	movq stack_start(%rip),%rsp
29942e78e97SFenghua Yu	movq	initial_code(%rip),%rax
30042e78e97SFenghua Yu	pushq	$0		# fake return address to stop unwinder
30142e78e97SFenghua Yu	pushq	$__KERNEL_CS	# set correct cs
30242e78e97SFenghua Yu	pushq	%rax		# target address in negative space
30342e78e97SFenghua Yu	lretq
30442e78e97SFenghua YuENDPROC(start_cpu0)
30542e78e97SFenghua Yu#endif
30642e78e97SFenghua Yu
307250c2277SThomas Gleixner	/* SMP bootup changes these two */
308da5968aeSSam Ravnborg	__REFDATA
3098170e6beSH. Peter Anvin	.balign	8
3108170e6beSH. Peter Anvin	GLOBAL(initial_code)
311250c2277SThomas Gleixner	.quad	x86_64_start_kernel
3128170e6beSH. Peter Anvin	GLOBAL(initial_gs)
3132add8e23SBrian Gerst	.quad	INIT_PER_CPU_VAR(irq_stack_union)
314f1fbabb3SSam Ravnborg
3158170e6beSH. Peter Anvin	GLOBAL(stack_start)
316250c2277SThomas Gleixner	.quad  init_thread_union+THREAD_SIZE-8
3179cf4f298SGlauber Costa	.word  0
318b9af7c0dSSuresh Siddha	__FINITDATA
319250c2277SThomas Gleixner
320250c2277SThomas Gleixnerbad_address:
321250c2277SThomas Gleixner	jmp bad_address
322250c2277SThomas Gleixner
3238170e6beSH. Peter Anvin	__INIT
3248866cd9dSRoland McGrath	.globl early_idt_handlers
3258866cd9dSRoland McGrathearly_idt_handlers:
3269900aa2fSH. Peter Anvin	# 104(%rsp) %rflags
3279900aa2fSH. Peter Anvin	#  96(%rsp) %cs
3289900aa2fSH. Peter Anvin	#  88(%rsp) %rip
3299900aa2fSH. Peter Anvin	#  80(%rsp) error code
330749c970aSAndi Kleen	i = 0
331749c970aSAndi Kleen	.rept NUM_EXCEPTION_VECTORS
3329900aa2fSH. Peter Anvin	.if (EXCEPTION_ERRCODE_MASK >> i) & 1
3339900aa2fSH. Peter Anvin	ASM_NOP2
3349900aa2fSH. Peter Anvin	.else
3359900aa2fSH. Peter Anvin	pushq $0		# Dummy error code, to make stack frame uniform
3369900aa2fSH. Peter Anvin	.endif
3379900aa2fSH. Peter Anvin	pushq $i		# 72(%rsp) Vector number
338749c970aSAndi Kleen	jmp early_idt_handler
339749c970aSAndi Kleen	i = i + 1
340749c970aSAndi Kleen	.endr
3418866cd9dSRoland McGrath
342ac630dd9SLinus Torvalds/* This is global to keep gas from relaxing the jumps */
343250c2277SThomas GleixnerENTRY(early_idt_handler)
3449900aa2fSH. Peter Anvin	cld
3459900aa2fSH. Peter Anvin
346b01d4e68SLinus Torvalds	cmpl $2,(%rsp)		# X86_TRAP_NMI
3475fa10196SH. Peter Anvin	je is_nmi		# Ignore NMI
3485fa10196SH. Peter Anvin
349250c2277SThomas Gleixner	cmpl $2,early_recursion_flag(%rip)
350250c2277SThomas Gleixner	jz  1f
351250c2277SThomas Gleixner	incl early_recursion_flag(%rip)
3529900aa2fSH. Peter Anvin
3539900aa2fSH. Peter Anvin	pushq %rax		# 64(%rsp)
3549900aa2fSH. Peter Anvin	pushq %rcx		# 56(%rsp)
3559900aa2fSH. Peter Anvin	pushq %rdx		# 48(%rsp)
3569900aa2fSH. Peter Anvin	pushq %rsi		# 40(%rsp)
3579900aa2fSH. Peter Anvin	pushq %rdi		# 32(%rsp)
3589900aa2fSH. Peter Anvin	pushq %r8		# 24(%rsp)
3599900aa2fSH. Peter Anvin	pushq %r9		# 16(%rsp)
3609900aa2fSH. Peter Anvin	pushq %r10		#  8(%rsp)
3619900aa2fSH. Peter Anvin	pushq %r11		#  0(%rsp)
3629900aa2fSH. Peter Anvin
3639900aa2fSH. Peter Anvin	cmpl $__KERNEL_CS,96(%rsp)
3648170e6beSH. Peter Anvin	jne 11f
3659900aa2fSH. Peter Anvin
3668170e6beSH. Peter Anvin	cmpl $14,72(%rsp)	# Page fault?
3678170e6beSH. Peter Anvin	jnz 10f
3688170e6beSH. Peter Anvin	GET_CR2_INTO(%rdi)	# can clobber any volatile register if pv
3698170e6beSH. Peter Anvin	call early_make_pgtable
3708170e6beSH. Peter Anvin	andl %eax,%eax
3718170e6beSH. Peter Anvin	jz 20f			# All good
3728170e6beSH. Peter Anvin
3738170e6beSH. Peter Anvin10:
3749900aa2fSH. Peter Anvin	leaq 88(%rsp),%rdi	# Pointer to %rip
3759900aa2fSH. Peter Anvin	call early_fixup_exception
3769900aa2fSH. Peter Anvin	andl %eax,%eax
3779900aa2fSH. Peter Anvin	jnz 20f			# Found an exception entry
3789900aa2fSH. Peter Anvin
3798170e6beSH. Peter Anvin11:
3809900aa2fSH. Peter Anvin#ifdef CONFIG_EARLY_PRINTK
3819900aa2fSH. Peter Anvin	GET_CR2_INTO(%r9)	# can clobber any volatile register if pv
3829900aa2fSH. Peter Anvin	movl 80(%rsp),%r8d	# error code
3839900aa2fSH. Peter Anvin	movl 72(%rsp),%esi	# vector number
3849900aa2fSH. Peter Anvin	movl 96(%rsp),%edx	# %cs
3859900aa2fSH. Peter Anvin	movq 88(%rsp),%rcx	# %rip
3868866cd9dSRoland McGrath	xorl %eax,%eax
387250c2277SThomas Gleixner	leaq early_idt_msg(%rip),%rdi
388250c2277SThomas Gleixner	call early_printk
389250c2277SThomas Gleixner	cmpl $2,early_recursion_flag(%rip)
390250c2277SThomas Gleixner	jz  1f
391250c2277SThomas Gleixner	call dump_stack
392250c2277SThomas Gleixner#ifdef CONFIG_KALLSYMS
393250c2277SThomas Gleixner	leaq early_idt_ripmsg(%rip),%rdi
3949900aa2fSH. Peter Anvin	movq 40(%rsp),%rsi	# %rip again
395250c2277SThomas Gleixner	call __print_symbol
396250c2277SThomas Gleixner#endif
397076f9776SIngo Molnar#endif /* EARLY_PRINTK */
398250c2277SThomas Gleixner1:	hlt
399250c2277SThomas Gleixner	jmp 1b
400076f9776SIngo Molnar
4018170e6beSH. Peter Anvin20:	# Exception table entry found or page table generated
4029900aa2fSH. Peter Anvin	popq %r11
4039900aa2fSH. Peter Anvin	popq %r10
4049900aa2fSH. Peter Anvin	popq %r9
4059900aa2fSH. Peter Anvin	popq %r8
4069900aa2fSH. Peter Anvin	popq %rdi
4079900aa2fSH. Peter Anvin	popq %rsi
4089900aa2fSH. Peter Anvin	popq %rdx
4099900aa2fSH. Peter Anvin	popq %rcx
4109900aa2fSH. Peter Anvin	popq %rax
4119900aa2fSH. Peter Anvin	decl early_recursion_flag(%rip)
4125fa10196SH. Peter Anvinis_nmi:
4135fa10196SH. Peter Anvin	addq $16,%rsp		# drop vector number and error code
4149900aa2fSH. Peter Anvin	INTERRUPT_RETURN
415ac630dd9SLinus TorvaldsENDPROC(early_idt_handler)
4169900aa2fSH. Peter Anvin
4178170e6beSH. Peter Anvin	__INITDATA
4188170e6beSH. Peter Anvin
4199900aa2fSH. Peter Anvin	.balign 4
420250c2277SThomas Gleixnerearly_recursion_flag:
421250c2277SThomas Gleixner	.long 0
422250c2277SThomas Gleixner
4239900aa2fSH. Peter Anvin#ifdef CONFIG_EARLY_PRINTK
424250c2277SThomas Gleixnerearly_idt_msg:
4258866cd9dSRoland McGrath	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
426250c2277SThomas Gleixnerearly_idt_ripmsg:
427250c2277SThomas Gleixner	.asciz "RIP %s\n"
428076f9776SIngo Molnar#endif /* CONFIG_EARLY_PRINTK */
429250c2277SThomas Gleixner
430250c2277SThomas Gleixner#define NEXT_PAGE(name) \
431250c2277SThomas Gleixner	.balign	PAGE_SIZE; \
4328170e6beSH. Peter AnvinGLOBAL(name)
433250c2277SThomas Gleixner
434250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */
435250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT)			\
436250c2277SThomas Gleixner	i = 0 ;						\
437250c2277SThomas Gleixner	.rept (COUNT) ;					\
4380e192b99SCyrill Gorcunov	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
439250c2277SThomas Gleixner	i = i + 1 ;					\
440250c2277SThomas Gleixner	.endr
441250c2277SThomas Gleixner
4428170e6beSH. Peter Anvin	__INITDATA
4438170e6beSH. Peter AnvinNEXT_PAGE(early_level4_pgt)
4448170e6beSH. Peter Anvin	.fill	511,8,0
4458170e6beSH. Peter Anvin	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
4468170e6beSH. Peter Anvin
4478170e6beSH. Peter AnvinNEXT_PAGE(early_dynamic_pgts)
4488170e6beSH. Peter Anvin	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
4498170e6beSH. Peter Anvin
450b9af7c0dSSuresh Siddha	.data
4518170e6beSH. Peter Anvin
4528170e6beSH. Peter Anvin#ifndef CONFIG_XEN
4538170e6beSH. Peter AnvinNEXT_PAGE(init_level4_pgt)
4548170e6beSH. Peter Anvin	.fill	512,8,0
4558170e6beSH. Peter Anvin#else
456250c2277SThomas GleixnerNEXT_PAGE(init_level4_pgt)
457250c2277SThomas Gleixner	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
458a6523748SEduardo Habkost	.org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
459250c2277SThomas Gleixner	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
460a6523748SEduardo Habkost	.org    init_level4_pgt + L4_START_KERNEL*8, 0
461250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
462250c2277SThomas Gleixner	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
463250c2277SThomas Gleixner
464250c2277SThomas GleixnerNEXT_PAGE(level3_ident_pgt)
465250c2277SThomas Gleixner	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
466250c2277SThomas Gleixner	.fill	511, 8, 0
4678170e6beSH. Peter AnvinNEXT_PAGE(level2_ident_pgt)
4688170e6beSH. Peter Anvin	/* Since I easily can, map the first 1G.
4698170e6beSH. Peter Anvin	 * Don't set NX because code runs from these pages.
4708170e6beSH. Peter Anvin	 */
4718170e6beSH. Peter Anvin	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
4728170e6beSH. Peter Anvin#endif
473250c2277SThomas Gleixner
474250c2277SThomas GleixnerNEXT_PAGE(level3_kernel_pgt)
475a6523748SEduardo Habkost	.fill	L3_START_KERNEL,8,0
476250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
477250c2277SThomas Gleixner	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
478250c2277SThomas Gleixner	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
479250c2277SThomas Gleixner
480250c2277SThomas GleixnerNEXT_PAGE(level2_kernel_pgt)
48188f3aec7SIngo Molnar	/*
48285eb69a1SIngo Molnar	 * 512 MB kernel mapping. We spend a full page on this pagetable
48388f3aec7SIngo Molnar	 * anyway.
48488f3aec7SIngo Molnar	 *
48588f3aec7SIngo Molnar	 * The kernel code+data+bss must not be bigger than that.
48688f3aec7SIngo Molnar	 *
48785eb69a1SIngo Molnar	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
48888f3aec7SIngo Molnar	 *  If you want to increase this then increase MODULES_VADDR
48988f3aec7SIngo Molnar	 *  too.)
49088f3aec7SIngo Molnar	 */
4918490638cSJeremy Fitzhardinge	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
492d4afe414SIngo Molnar		KERNEL_IMAGE_SIZE/PMD_SIZE)
493250c2277SThomas Gleixner
4948170e6beSH. Peter AnvinNEXT_PAGE(level2_fixmap_pgt)
4958170e6beSH. Peter Anvin	.fill	506,8,0
4968170e6beSH. Peter Anvin	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
4978170e6beSH. Peter Anvin	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
4988170e6beSH. Peter Anvin	.fill	5,8,0
4998170e6beSH. Peter Anvin
5008170e6beSH. Peter AnvinNEXT_PAGE(level1_fixmap_pgt)
501250c2277SThomas Gleixner	.fill	512,8,0
502250c2277SThomas Gleixner
503250c2277SThomas Gleixner#undef PMDS
504250c2277SThomas Gleixner
505250c2277SThomas Gleixner	.data
506250c2277SThomas Gleixner	.align 16
507a939098aSGlauber Costa	.globl early_gdt_descr
508a939098aSGlauber Costaearly_gdt_descr:
509a939098aSGlauber Costa	.word	GDT_ENTRIES*8-1
5103e5d8f97STejun Heoearly_gdt_descr_base:
5112add8e23SBrian Gerst	.quad	INIT_PER_CPU_VAR(gdt_page)
512250c2277SThomas Gleixner
513250c2277SThomas GleixnerENTRY(phys_base)
514250c2277SThomas Gleixner	/* This must match the first entry in level2_kernel_pgt */
515250c2277SThomas Gleixner	.quad   0x0000000000000000
516250c2277SThomas Gleixner
517ef7f0d6aSAndrey Ryabinin#ifdef CONFIG_KASAN
518ef7f0d6aSAndrey Ryabinin#define FILL(VAL, COUNT)				\
519ef7f0d6aSAndrey Ryabinin	.rept (COUNT) ;					\
520ef7f0d6aSAndrey Ryabinin	.quad	(VAL) ;					\
521ef7f0d6aSAndrey Ryabinin	.endr
522ef7f0d6aSAndrey Ryabinin
523ef7f0d6aSAndrey RyabininNEXT_PAGE(kasan_zero_pte)
524ef7f0d6aSAndrey Ryabinin	FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
525ef7f0d6aSAndrey RyabininNEXT_PAGE(kasan_zero_pmd)
526ef7f0d6aSAndrey Ryabinin	FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
527ef7f0d6aSAndrey RyabininNEXT_PAGE(kasan_zero_pud)
528ef7f0d6aSAndrey Ryabinin	FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
529ef7f0d6aSAndrey Ryabinin
530ef7f0d6aSAndrey Ryabinin#undef FILL
531ef7f0d6aSAndrey Ryabinin#endif
532ef7f0d6aSAndrey Ryabinin
533ef7f0d6aSAndrey Ryabinin
5348c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S"
535250c2277SThomas Gleixner
53602b7da37STim Abbott	__PAGE_ALIGNED_BSS
5378170e6beSH. Peter AnvinNEXT_PAGE(empty_zero_page)
538250c2277SThomas Gleixner	.skip PAGE_SIZE
539ef7f0d6aSAndrey Ryabinin
540ef7f0d6aSAndrey Ryabinin#ifdef CONFIG_KASAN
541ef7f0d6aSAndrey Ryabinin/*
542ef7f0d6aSAndrey Ryabinin * This page used as early shadow. We don't use empty_zero_page
543ef7f0d6aSAndrey Ryabinin * at early stages, stack instrumentation could write some garbage
544ef7f0d6aSAndrey Ryabinin * to this page.
545ef7f0d6aSAndrey Ryabinin * Latter we reuse it as zero shadow for large ranges of memory
546ef7f0d6aSAndrey Ryabinin * that allowed to access, but not instrumented by kasan
547ef7f0d6aSAndrey Ryabinin * (vmalloc/vmemmap ...).
548ef7f0d6aSAndrey Ryabinin */
549ef7f0d6aSAndrey RyabininNEXT_PAGE(kasan_zero_page)
550ef7f0d6aSAndrey Ryabinin	.skip PAGE_SIZE
551ef7f0d6aSAndrey Ryabinin#endif
552