xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision a9468df5)
1250c2277SThomas Gleixner/*
25b171e82SAlexander Kuleshov *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
3250c2277SThomas Gleixner *
4250c2277SThomas Gleixner *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5250c2277SThomas Gleixner *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6250c2277SThomas Gleixner *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7250c2277SThomas Gleixner *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8250c2277SThomas Gleixner *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9250c2277SThomas Gleixner */
10250c2277SThomas Gleixner
11250c2277SThomas Gleixner
12250c2277SThomas Gleixner#include <linux/linkage.h>
13250c2277SThomas Gleixner#include <linux/threads.h>
14250c2277SThomas Gleixner#include <linux/init.h>
15250c2277SThomas Gleixner#include <asm/segment.h>
16250c2277SThomas Gleixner#include <asm/pgtable.h>
17250c2277SThomas Gleixner#include <asm/page.h>
18250c2277SThomas Gleixner#include <asm/msr.h>
19250c2277SThomas Gleixner#include <asm/cache.h>
20369101daSCyrill Gorcunov#include <asm/processor-flags.h>
21b12d8db8STejun Heo#include <asm/percpu.h>
229900aa2fSH. Peter Anvin#include <asm/nops.h>
237bbcdb1cSAndy Lutomirski#include "../entry/calling.h"
24784d5699SAl Viro#include <asm/export.h>
25250c2277SThomas Gleixner
2649a69787SGlauber de Oliveira Costa#ifdef CONFIG_PARAVIRT
2749a69787SGlauber de Oliveira Costa#include <asm/asm-offsets.h>
2849a69787SGlauber de Oliveira Costa#include <asm/paravirt.h>
29ffc4bc9cSH. Peter Anvin#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
3049a69787SGlauber de Oliveira Costa#else
31ffc4bc9cSH. Peter Anvin#define GET_CR2_INTO(reg) movq %cr2, reg
329900aa2fSH. Peter Anvin#define INTERRUPT_RETURN iretq
3349a69787SGlauber de Oliveira Costa#endif
3449a69787SGlauber de Oliveira Costa
353ad2f3fbSDaniel Mack/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
36250c2277SThomas Gleixner * because we need identity-mapped pages.
37250c2277SThomas Gleixner *
38250c2277SThomas Gleixner */
39250c2277SThomas Gleixner
40a6523748SEduardo Habkost#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
41a6523748SEduardo Habkost
42021182e5SThomas GarnierL4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
43a6523748SEduardo HabkostL4_START_KERNEL = pgd_index(__START_KERNEL_map)
44a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map)
45a6523748SEduardo Habkost
46250c2277SThomas Gleixner	.text
474ae59b91STim Abbott	__HEAD
48250c2277SThomas Gleixner	.code64
49250c2277SThomas Gleixner	.globl startup_64
50250c2277SThomas Gleixnerstartup_64:
51250c2277SThomas Gleixner	/*
521256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
53250c2277SThomas Gleixner	 * and someone has loaded an identity mapped page table
54250c2277SThomas Gleixner	 * for us.  These identity mapped page tables map all of the
55250c2277SThomas Gleixner	 * kernel pages and possibly all of memory.
56250c2277SThomas Gleixner	 *
578170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
58250c2277SThomas Gleixner	 *
59250c2277SThomas Gleixner	 * We come here either directly from a 64bit bootloader, or from
605b171e82SAlexander Kuleshov	 * arch/x86/boot/compressed/head_64.S.
61250c2277SThomas Gleixner	 *
62250c2277SThomas Gleixner	 * We only come here initially at boot nothing else comes here.
63250c2277SThomas Gleixner	 *
64250c2277SThomas Gleixner	 * Since we may be loaded at an address different from what we were
65250c2277SThomas Gleixner	 * compiled to run at we first fixup the physical addresses in our page
66250c2277SThomas Gleixner	 * tables and then reload them.
67250c2277SThomas Gleixner	 */
68250c2277SThomas Gleixner
6991ed140dSBorislav Petkov	/*
70b32f96c7SJosh Poimboeuf	 * Setup stack for verify_cpu(). "-8" because initial_stack is defined
7191ed140dSBorislav Petkov	 * this way, see below. Our best guess is a NULL ptr for stack
7291ed140dSBorislav Petkov	 * termination heuristics and we don't want to break anything which
7391ed140dSBorislav Petkov	 * might depend on it (kgdb, ...).
7491ed140dSBorislav Petkov	 */
7591ed140dSBorislav Petkov	leaq	(__end_init_task - 8)(%rip), %rsp
7691ed140dSBorislav Petkov
7704633df0SBorislav Petkov	/* Sanitize CPU configuration */
7804633df0SBorislav Petkov	call verify_cpu
7904633df0SBorislav Petkov
808170e6beSH. Peter Anvin	/*
818170e6beSH. Peter Anvin	 * Compute the delta between the address I am compiled to run at and the
82250c2277SThomas Gleixner	 * address I am actually running at.
83250c2277SThomas Gleixner	 */
84250c2277SThomas Gleixner	leaq	_text(%rip), %rbp
85250c2277SThomas Gleixner	subq	$_text - __START_KERNEL_map, %rbp
86250c2277SThomas Gleixner
87250c2277SThomas Gleixner	/* Is the address not 2M aligned? */
88a4733143SAlexander Kuleshov	testl	$~PMD_PAGE_MASK, %ebp
89250c2277SThomas Gleixner	jnz	bad_address
90250c2277SThomas Gleixner
918170e6beSH. Peter Anvin	/*
928170e6beSH. Peter Anvin	 * Is the address too large?
93250c2277SThomas Gleixner	 */
948170e6beSH. Peter Anvin	leaq	_text(%rip), %rax
958170e6beSH. Peter Anvin	shrq	$MAX_PHYSMEM_BITS, %rax
968170e6beSH. Peter Anvin	jnz	bad_address
97250c2277SThomas Gleixner
988170e6beSH. Peter Anvin	/*
998170e6beSH. Peter Anvin	 * Fixup the physical addresses in the page table
1008170e6beSH. Peter Anvin	 */
1018170e6beSH. Peter Anvin	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
102250c2277SThomas Gleixner
103250c2277SThomas Gleixner	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
104250c2277SThomas Gleixner	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
105250c2277SThomas Gleixner
106250c2277SThomas Gleixner	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
107250c2277SThomas Gleixner
1088170e6beSH. Peter Anvin	/*
1098170e6beSH. Peter Anvin	 * Set up the identity mapping for the switchover.  These
1108170e6beSH. Peter Anvin	 * entries should *NOT* have the global bit set!  This also
1118170e6beSH. Peter Anvin	 * creates a bunch of nonsense entries but that is fine --
1128170e6beSH. Peter Anvin	 * it avoids problems around wraparound.
1138170e6beSH. Peter Anvin	 */
114250c2277SThomas Gleixner	leaq	_text(%rip), %rdi
1158170e6beSH. Peter Anvin	leaq	early_level4_pgt(%rip), %rbx
116250c2277SThomas Gleixner
117250c2277SThomas Gleixner	movq	%rdi, %rax
1188170e6beSH. Peter Anvin	shrq	$PGDIR_SHIFT, %rax
1198170e6beSH. Peter Anvin
1208170e6beSH. Peter Anvin	leaq	(4096 + _KERNPG_TABLE)(%rbx), %rdx
1218170e6beSH. Peter Anvin	movq	%rdx, 0(%rbx,%rax,8)
1228170e6beSH. Peter Anvin	movq	%rdx, 8(%rbx,%rax,8)
1238170e6beSH. Peter Anvin
1248170e6beSH. Peter Anvin	addq	$4096, %rdx
1258170e6beSH. Peter Anvin	movq	%rdi, %rax
126250c2277SThomas Gleixner	shrq	$PUD_SHIFT, %rax
1278170e6beSH. Peter Anvin	andl	$(PTRS_PER_PUD-1), %eax
128e9d0626eSZhang Yanfei	movq	%rdx, 4096(%rbx,%rax,8)
129e9d0626eSZhang Yanfei	incl	%eax
130e9d0626eSZhang Yanfei	andl	$(PTRS_PER_PUD-1), %eax
131e9d0626eSZhang Yanfei	movq	%rdx, 4096(%rbx,%rax,8)
132250c2277SThomas Gleixner
1338170e6beSH. Peter Anvin	addq	$8192, %rbx
134250c2277SThomas Gleixner	movq	%rdi, %rax
1358170e6beSH. Peter Anvin	shrq	$PMD_SHIFT, %rdi
1368170e6beSH. Peter Anvin	addq	$(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
1378170e6beSH. Peter Anvin	leaq	(_end - 1)(%rip), %rcx
1388170e6beSH. Peter Anvin	shrq	$PMD_SHIFT, %rcx
1398170e6beSH. Peter Anvin	subq	%rdi, %rcx
1408170e6beSH. Peter Anvin	incl	%ecx
1418170e6beSH. Peter Anvin
1428170e6beSH. Peter Anvin1:
1438170e6beSH. Peter Anvin	andq	$(PTRS_PER_PMD - 1), %rdi
1448170e6beSH. Peter Anvin	movq	%rax, (%rbx,%rdi,8)
1458170e6beSH. Peter Anvin	incq	%rdi
1468170e6beSH. Peter Anvin	addq	$PMD_SIZE, %rax
1478170e6beSH. Peter Anvin	decl	%ecx
1488170e6beSH. Peter Anvin	jnz	1b
149250c2277SThomas Gleixner
15031eedd82SThomas Gleixner	/*
15131eedd82SThomas Gleixner	 * Fixup the kernel text+data virtual addresses. Note that
15231eedd82SThomas Gleixner	 * we might write invalid pmds, when the kernel is relocated
15331eedd82SThomas Gleixner	 * cleanup_highmap() fixes this up along with the mappings
15431eedd82SThomas Gleixner	 * beyond _end.
155250c2277SThomas Gleixner	 */
156250c2277SThomas Gleixner	leaq	level2_kernel_pgt(%rip), %rdi
157250c2277SThomas Gleixner	leaq	4096(%rdi), %r8
158250c2277SThomas Gleixner	/* See if it is a valid page table entry */
1593e1aa7cbSDenys Vlasenko1:	testb	$1, 0(%rdi)
160250c2277SThomas Gleixner	jz	2f
161250c2277SThomas Gleixner	addq	%rbp, 0(%rdi)
162250c2277SThomas Gleixner	/* Go to the next page */
163250c2277SThomas Gleixner2:	addq	$8, %rdi
164250c2277SThomas Gleixner	cmp	%r8, %rdi
165250c2277SThomas Gleixner	jne	1b
166250c2277SThomas Gleixner
167250c2277SThomas Gleixner	/* Fixup phys_base */
168250c2277SThomas Gleixner	addq	%rbp, phys_base(%rip)
169250c2277SThomas Gleixner
1708170e6beSH. Peter Anvin	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
1718170e6beSH. Peter Anvin	jmp 1f
172250c2277SThomas GleixnerENTRY(secondary_startup_64)
173250c2277SThomas Gleixner	/*
1741256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
175250c2277SThomas Gleixner	 * and someone has loaded a mapped page table.
176250c2277SThomas Gleixner	 *
1778170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
178250c2277SThomas Gleixner	 *
179250c2277SThomas Gleixner	 * We come here either from startup_64 (using physical addresses)
180250c2277SThomas Gleixner	 * or from trampoline.S (using virtual addresses).
181250c2277SThomas Gleixner	 *
182250c2277SThomas Gleixner	 * Using virtual addresses from trampoline.S removes the need
183250c2277SThomas Gleixner	 * to have any identity mapped pages in the kernel page table
184250c2277SThomas Gleixner	 * after the boot processor executes this code.
185250c2277SThomas Gleixner	 */
186250c2277SThomas Gleixner
18704633df0SBorislav Petkov	/* Sanitize CPU configuration */
18804633df0SBorislav Petkov	call verify_cpu
18904633df0SBorislav Petkov
1908170e6beSH. Peter Anvin	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
1918170e6beSH. Peter Anvin1:
1928170e6beSH. Peter Anvin
193250c2277SThomas Gleixner	/* Enable PAE mode and PGE */
1948170e6beSH. Peter Anvin	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
1958170e6beSH. Peter Anvin	movq	%rcx, %cr4
196250c2277SThomas Gleixner
197250c2277SThomas Gleixner	/* Setup early boot stage 4 level pagetables. */
198250c2277SThomas Gleixner	addq	phys_base(%rip), %rax
199250c2277SThomas Gleixner	movq	%rax, %cr3
200250c2277SThomas Gleixner
201250c2277SThomas Gleixner	/* Ensure I am executing from virtual addresses */
202250c2277SThomas Gleixner	movq	$1f, %rax
203250c2277SThomas Gleixner	jmp	*%rax
204250c2277SThomas Gleixner1:
205250c2277SThomas Gleixner
206250c2277SThomas Gleixner	/* Check if nx is implemented */
207250c2277SThomas Gleixner	movl	$0x80000001, %eax
208250c2277SThomas Gleixner	cpuid
209250c2277SThomas Gleixner	movl	%edx,%edi
210250c2277SThomas Gleixner
211250c2277SThomas Gleixner	/* Setup EFER (Extended Feature Enable Register) */
212250c2277SThomas Gleixner	movl	$MSR_EFER, %ecx
213250c2277SThomas Gleixner	rdmsr
214250c2277SThomas Gleixner	btsl	$_EFER_SCE, %eax	/* Enable System Call */
215250c2277SThomas Gleixner	btl	$20,%edi		/* No Execute supported? */
216250c2277SThomas Gleixner	jnc     1f
217250c2277SThomas Gleixner	btsl	$_EFER_NX, %eax
21878d77df7SH. Peter Anvin	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
219250c2277SThomas Gleixner1:	wrmsr				/* Make changes effective */
220250c2277SThomas Gleixner
221250c2277SThomas Gleixner	/* Setup cr0 */
222369101daSCyrill Gorcunov#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
223369101daSCyrill Gorcunov			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
224369101daSCyrill Gorcunov			 X86_CR0_PG)
225369101daSCyrill Gorcunov	movl	$CR0_STATE, %eax
226250c2277SThomas Gleixner	/* Make changes effective */
227250c2277SThomas Gleixner	movq	%rax, %cr0
228250c2277SThomas Gleixner
229250c2277SThomas Gleixner	/* Setup a boot time stack */
230b32f96c7SJosh Poimboeuf	movq initial_stack(%rip), %rsp
231250c2277SThomas Gleixner
232250c2277SThomas Gleixner	/* zero EFLAGS after setting rsp */
233250c2277SThomas Gleixner	pushq $0
234250c2277SThomas Gleixner	popfq
235250c2277SThomas Gleixner
236250c2277SThomas Gleixner	/*
237250c2277SThomas Gleixner	 * We must switch to a new descriptor in kernel space for the GDT
238250c2277SThomas Gleixner	 * because soon the kernel won't have access anymore to the userspace
239250c2277SThomas Gleixner	 * addresses where we're currently running on. We have to do that here
240250c2277SThomas Gleixner	 * because in 32bit we couldn't load a 64bit linear address.
241250c2277SThomas Gleixner	 */
242a939098aSGlauber Costa	lgdt	early_gdt_descr(%rip)
243250c2277SThomas Gleixner
2448ec6993dSBrian Gerst	/* set up data segments */
2458ec6993dSBrian Gerst	xorl %eax,%eax
246250c2277SThomas Gleixner	movl %eax,%ds
247250c2277SThomas Gleixner	movl %eax,%ss
248250c2277SThomas Gleixner	movl %eax,%es
249250c2277SThomas Gleixner
250250c2277SThomas Gleixner	/*
251250c2277SThomas Gleixner	 * We don't really need to load %fs or %gs, but load them anyway
252250c2277SThomas Gleixner	 * to kill any stale realmode selectors.  This allows execution
253250c2277SThomas Gleixner	 * under VT hardware.
254250c2277SThomas Gleixner	 */
255250c2277SThomas Gleixner	movl %eax,%fs
256250c2277SThomas Gleixner	movl %eax,%gs
257250c2277SThomas Gleixner
258f32ff538STejun Heo	/* Set up %gs.
259f32ff538STejun Heo	 *
260947e76cdSBrian Gerst	 * The base of %gs always points to the bottom of the irqstack
261947e76cdSBrian Gerst	 * union.  If the stack protector canary is enabled, it is
262947e76cdSBrian Gerst	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
263947e76cdSBrian Gerst	 * init data section till per cpu areas are set up.
264250c2277SThomas Gleixner	 */
265250c2277SThomas Gleixner	movl	$MSR_GS_BASE,%ecx
266650fb439SBrian Gerst	movl	initial_gs(%rip),%eax
267650fb439SBrian Gerst	movl	initial_gs+4(%rip),%edx
268250c2277SThomas Gleixner	wrmsr
269250c2277SThomas Gleixner
2708170e6beSH. Peter Anvin	/* rsi is pointer to real mode structure with interesting info.
271250c2277SThomas Gleixner	   pass it to C */
2728170e6beSH. Peter Anvin	movq	%rsi, %rdi
273a9468df5SJosh Poimboeuf	jmp	start_cpu
274a9468df5SJosh PoimboeufENDPROC(secondary_startup_64)
275250c2277SThomas Gleixner
276a9468df5SJosh PoimboeufENTRY(start_cpu)
277a9468df5SJosh Poimboeuf	/*
278a9468df5SJosh Poimboeuf	 * Jump to run C code and to be on a real kernel address.
279250c2277SThomas Gleixner	 * Since we are running on identity-mapped space we have to jump
280250c2277SThomas Gleixner	 * to the full 64bit address, this is only possible as indirect
281250c2277SThomas Gleixner	 * jump.  In addition we need to ensure %cs is set so we make this
282250c2277SThomas Gleixner	 * a far return.
2838170e6beSH. Peter Anvin	 *
2848170e6beSH. Peter Anvin	 * Note: do not change to far jump indirect with 64bit offset.
2858170e6beSH. Peter Anvin	 *
2868170e6beSH. Peter Anvin	 * AMD does not support far jump indirect with 64bit offset.
2878170e6beSH. Peter Anvin	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
2888170e6beSH. Peter Anvin	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
2898170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
2908170e6beSH. Peter Anvin	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
2918170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
2928170e6beSH. Peter Anvin	 *
2938170e6beSH. Peter Anvin	 * Intel64 does support 64bit offset.
2948170e6beSH. Peter Anvin	 * Software Developer Manual Vol 2: states:
2958170e6beSH. Peter Anvin	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
2968170e6beSH. Peter Anvin	 *		address given in m16:16
2978170e6beSH. Peter Anvin	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
2988170e6beSH. Peter Anvin	 *		address given in m16:32.
2998170e6beSH. Peter Anvin	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
3008170e6beSH. Peter Anvin	 *		address given in m16:64.
301250c2277SThomas Gleixner	 */
302250c2277SThomas Gleixner	movq	initial_code(%rip),%rax
303250c2277SThomas Gleixner	pushq	$0		# fake return address to stop unwinder
304250c2277SThomas Gleixner	pushq	$__KERNEL_CS	# set correct cs
305250c2277SThomas Gleixner	pushq	%rax		# target address in negative space
306250c2277SThomas Gleixner	lretq
307a9468df5SJosh PoimboeufENDPROC(start_cpu)
308250c2277SThomas Gleixner
30904633df0SBorislav Petkov#include "verify_cpu.S"
31004633df0SBorislav Petkov
31142e78e97SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU
31242e78e97SFenghua Yu/*
31342e78e97SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
31442e78e97SFenghua Yu * up already except stack. We just set up stack here. Then call
315a9468df5SJosh Poimboeuf * start_secondary() via start_cpu().
31642e78e97SFenghua Yu */
31742e78e97SFenghua YuENTRY(start_cpu0)
318b32f96c7SJosh Poimboeuf	movq	initial_stack(%rip), %rsp
319a9468df5SJosh Poimboeuf	jmp	start_cpu
32042e78e97SFenghua YuENDPROC(start_cpu0)
32142e78e97SFenghua Yu#endif
32242e78e97SFenghua Yu
323b32f96c7SJosh Poimboeuf	/* Both SMP bootup and ACPI suspend change these variables */
324da5968aeSSam Ravnborg	__REFDATA
3258170e6beSH. Peter Anvin	.balign	8
3268170e6beSH. Peter Anvin	GLOBAL(initial_code)
327250c2277SThomas Gleixner	.quad	x86_64_start_kernel
3288170e6beSH. Peter Anvin	GLOBAL(initial_gs)
3292add8e23SBrian Gerst	.quad	INIT_PER_CPU_VAR(irq_stack_union)
330b32f96c7SJosh Poimboeuf	GLOBAL(initial_stack)
331250c2277SThomas Gleixner	.quad  init_thread_union+THREAD_SIZE-8
332b9af7c0dSSuresh Siddha	__FINITDATA
333250c2277SThomas Gleixner
334250c2277SThomas Gleixnerbad_address:
335250c2277SThomas Gleixner	jmp bad_address
336250c2277SThomas Gleixner
3378170e6beSH. Peter Anvin	__INIT
338cdeb6048SAndy LutomirskiENTRY(early_idt_handler_array)
3399900aa2fSH. Peter Anvin	# 104(%rsp) %rflags
3409900aa2fSH. Peter Anvin	#  96(%rsp) %cs
3419900aa2fSH. Peter Anvin	#  88(%rsp) %rip
3429900aa2fSH. Peter Anvin	#  80(%rsp) error code
343749c970aSAndi Kleen	i = 0
344749c970aSAndi Kleen	.rept NUM_EXCEPTION_VECTORS
345cdeb6048SAndy Lutomirski	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
3469900aa2fSH. Peter Anvin	pushq $0		# Dummy error code, to make stack frame uniform
3479900aa2fSH. Peter Anvin	.endif
3489900aa2fSH. Peter Anvin	pushq $i		# 72(%rsp) Vector number
349cdeb6048SAndy Lutomirski	jmp early_idt_handler_common
350749c970aSAndi Kleen	i = i + 1
351cdeb6048SAndy Lutomirski	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
352749c970aSAndi Kleen	.endr
353cdeb6048SAndy LutomirskiENDPROC(early_idt_handler_array)
3548866cd9dSRoland McGrath
355cdeb6048SAndy Lutomirskiearly_idt_handler_common:
356cdeb6048SAndy Lutomirski	/*
357cdeb6048SAndy Lutomirski	 * The stack is the hardware frame, an error code or zero, and the
358cdeb6048SAndy Lutomirski	 * vector number.
359cdeb6048SAndy Lutomirski	 */
3609900aa2fSH. Peter Anvin	cld
3619900aa2fSH. Peter Anvin
362250c2277SThomas Gleixner	incl early_recursion_flag(%rip)
3639900aa2fSH. Peter Anvin
3647bbcdb1cSAndy Lutomirski	/* The vector number is currently in the pt_regs->di slot. */
3657bbcdb1cSAndy Lutomirski	pushq %rsi				/* pt_regs->si */
3667bbcdb1cSAndy Lutomirski	movq 8(%rsp), %rsi			/* RSI = vector number */
3677bbcdb1cSAndy Lutomirski	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
3687bbcdb1cSAndy Lutomirski	pushq %rdx				/* pt_regs->dx */
3697bbcdb1cSAndy Lutomirski	pushq %rcx				/* pt_regs->cx */
3707bbcdb1cSAndy Lutomirski	pushq %rax				/* pt_regs->ax */
3717bbcdb1cSAndy Lutomirski	pushq %r8				/* pt_regs->r8 */
3727bbcdb1cSAndy Lutomirski	pushq %r9				/* pt_regs->r9 */
3737bbcdb1cSAndy Lutomirski	pushq %r10				/* pt_regs->r10 */
3747bbcdb1cSAndy Lutomirski	pushq %r11				/* pt_regs->r11 */
3757bbcdb1cSAndy Lutomirski	pushq %rbx				/* pt_regs->bx */
3767bbcdb1cSAndy Lutomirski	pushq %rbp				/* pt_regs->bp */
3777bbcdb1cSAndy Lutomirski	pushq %r12				/* pt_regs->r12 */
3787bbcdb1cSAndy Lutomirski	pushq %r13				/* pt_regs->r13 */
3797bbcdb1cSAndy Lutomirski	pushq %r14				/* pt_regs->r14 */
3807bbcdb1cSAndy Lutomirski	pushq %r15				/* pt_regs->r15 */
3819900aa2fSH. Peter Anvin
3827bbcdb1cSAndy Lutomirski	cmpq $14,%rsi		/* Page fault? */
3838170e6beSH. Peter Anvin	jnz 10f
3847bbcdb1cSAndy Lutomirski	GET_CR2_INTO(%rdi)	/* Can clobber any volatile register if pv */
3858170e6beSH. Peter Anvin	call early_make_pgtable
3868170e6beSH. Peter Anvin	andl %eax,%eax
3877bbcdb1cSAndy Lutomirski	jz 20f			/* All good */
3888170e6beSH. Peter Anvin
3898170e6beSH. Peter Anvin10:
3907bbcdb1cSAndy Lutomirski	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
3919900aa2fSH. Peter Anvin	call early_fixup_exception
3929900aa2fSH. Peter Anvin
3930e861fbbSAndy Lutomirski20:
3949900aa2fSH. Peter Anvin	decl early_recursion_flag(%rip)
3957bbcdb1cSAndy Lutomirski	jmp restore_regs_and_iret
396cdeb6048SAndy LutomirskiENDPROC(early_idt_handler_common)
3979900aa2fSH. Peter Anvin
3988170e6beSH. Peter Anvin	__INITDATA
3998170e6beSH. Peter Anvin
4009900aa2fSH. Peter Anvin	.balign 4
4010e861fbbSAndy LutomirskiGLOBAL(early_recursion_flag)
402250c2277SThomas Gleixner	.long 0
403250c2277SThomas Gleixner
404250c2277SThomas Gleixner#define NEXT_PAGE(name) \
405250c2277SThomas Gleixner	.balign	PAGE_SIZE; \
4068170e6beSH. Peter AnvinGLOBAL(name)
407250c2277SThomas Gleixner
408250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */
409250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT)			\
410250c2277SThomas Gleixner	i = 0 ;						\
411250c2277SThomas Gleixner	.rept (COUNT) ;					\
4120e192b99SCyrill Gorcunov	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
413250c2277SThomas Gleixner	i = i + 1 ;					\
414250c2277SThomas Gleixner	.endr
415250c2277SThomas Gleixner
4168170e6beSH. Peter Anvin	__INITDATA
4178170e6beSH. Peter AnvinNEXT_PAGE(early_level4_pgt)
4188170e6beSH. Peter Anvin	.fill	511,8,0
4198170e6beSH. Peter Anvin	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
4208170e6beSH. Peter Anvin
4218170e6beSH. Peter AnvinNEXT_PAGE(early_dynamic_pgts)
4228170e6beSH. Peter Anvin	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
4238170e6beSH. Peter Anvin
424b9af7c0dSSuresh Siddha	.data
4258170e6beSH. Peter Anvin
4268170e6beSH. Peter Anvin#ifndef CONFIG_XEN
4278170e6beSH. Peter AnvinNEXT_PAGE(init_level4_pgt)
4288170e6beSH. Peter Anvin	.fill	512,8,0
4298170e6beSH. Peter Anvin#else
430250c2277SThomas GleixnerNEXT_PAGE(init_level4_pgt)
431250c2277SThomas Gleixner	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
432a6523748SEduardo Habkost	.org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
433250c2277SThomas Gleixner	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
434a6523748SEduardo Habkost	.org    init_level4_pgt + L4_START_KERNEL*8, 0
435250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
436250c2277SThomas Gleixner	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
437250c2277SThomas Gleixner
438250c2277SThomas GleixnerNEXT_PAGE(level3_ident_pgt)
439250c2277SThomas Gleixner	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
440250c2277SThomas Gleixner	.fill	511, 8, 0
4418170e6beSH. Peter AnvinNEXT_PAGE(level2_ident_pgt)
4428170e6beSH. Peter Anvin	/* Since I easily can, map the first 1G.
4438170e6beSH. Peter Anvin	 * Don't set NX because code runs from these pages.
4448170e6beSH. Peter Anvin	 */
4458170e6beSH. Peter Anvin	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
4468170e6beSH. Peter Anvin#endif
447250c2277SThomas Gleixner
448250c2277SThomas GleixnerNEXT_PAGE(level3_kernel_pgt)
449a6523748SEduardo Habkost	.fill	L3_START_KERNEL,8,0
450250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
451250c2277SThomas Gleixner	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
452250c2277SThomas Gleixner	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
453250c2277SThomas Gleixner
454250c2277SThomas GleixnerNEXT_PAGE(level2_kernel_pgt)
45588f3aec7SIngo Molnar	/*
45685eb69a1SIngo Molnar	 * 512 MB kernel mapping. We spend a full page on this pagetable
45788f3aec7SIngo Molnar	 * anyway.
45888f3aec7SIngo Molnar	 *
45988f3aec7SIngo Molnar	 * The kernel code+data+bss must not be bigger than that.
46088f3aec7SIngo Molnar	 *
46185eb69a1SIngo Molnar	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
46288f3aec7SIngo Molnar	 *  If you want to increase this then increase MODULES_VADDR
46388f3aec7SIngo Molnar	 *  too.)
46488f3aec7SIngo Molnar	 */
4658490638cSJeremy Fitzhardinge	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
466d4afe414SIngo Molnar		KERNEL_IMAGE_SIZE/PMD_SIZE)
467250c2277SThomas Gleixner
4688170e6beSH. Peter AnvinNEXT_PAGE(level2_fixmap_pgt)
4698170e6beSH. Peter Anvin	.fill	506,8,0
4708170e6beSH. Peter Anvin	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
4718170e6beSH. Peter Anvin	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
4728170e6beSH. Peter Anvin	.fill	5,8,0
4738170e6beSH. Peter Anvin
4748170e6beSH. Peter AnvinNEXT_PAGE(level1_fixmap_pgt)
475250c2277SThomas Gleixner	.fill	512,8,0
476250c2277SThomas Gleixner
477250c2277SThomas Gleixner#undef PMDS
478250c2277SThomas Gleixner
479250c2277SThomas Gleixner	.data
480250c2277SThomas Gleixner	.align 16
481a939098aSGlauber Costa	.globl early_gdt_descr
482a939098aSGlauber Costaearly_gdt_descr:
483a939098aSGlauber Costa	.word	GDT_ENTRIES*8-1
4843e5d8f97STejun Heoearly_gdt_descr_base:
4852add8e23SBrian Gerst	.quad	INIT_PER_CPU_VAR(gdt_page)
486250c2277SThomas Gleixner
487250c2277SThomas GleixnerENTRY(phys_base)
488250c2277SThomas Gleixner	/* This must match the first entry in level2_kernel_pgt */
489250c2277SThomas Gleixner	.quad   0x0000000000000000
490784d5699SAl ViroEXPORT_SYMBOL(phys_base)
491250c2277SThomas Gleixner
4928c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S"
493250c2277SThomas Gleixner
49402b7da37STim Abbott	__PAGE_ALIGNED_BSS
4958170e6beSH. Peter AnvinNEXT_PAGE(empty_zero_page)
496250c2277SThomas Gleixner	.skip PAGE_SIZE
497784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page)
498ef7f0d6aSAndrey Ryabinin
499