xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision c88d7150)
1250c2277SThomas Gleixner/*
25b171e82SAlexander Kuleshov *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
3250c2277SThomas Gleixner *
4250c2277SThomas Gleixner *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5250c2277SThomas Gleixner *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6250c2277SThomas Gleixner *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7250c2277SThomas Gleixner *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8250c2277SThomas Gleixner *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9250c2277SThomas Gleixner */
10250c2277SThomas Gleixner
11250c2277SThomas Gleixner
12250c2277SThomas Gleixner#include <linux/linkage.h>
13250c2277SThomas Gleixner#include <linux/threads.h>
14250c2277SThomas Gleixner#include <linux/init.h>
15250c2277SThomas Gleixner#include <asm/segment.h>
16250c2277SThomas Gleixner#include <asm/pgtable.h>
17250c2277SThomas Gleixner#include <asm/page.h>
18250c2277SThomas Gleixner#include <asm/msr.h>
19250c2277SThomas Gleixner#include <asm/cache.h>
20369101daSCyrill Gorcunov#include <asm/processor-flags.h>
21b12d8db8STejun Heo#include <asm/percpu.h>
229900aa2fSH. Peter Anvin#include <asm/nops.h>
237bbcdb1cSAndy Lutomirski#include "../entry/calling.h"
24784d5699SAl Viro#include <asm/export.h>
25250c2277SThomas Gleixner
2649a69787SGlauber de Oliveira Costa#ifdef CONFIG_PARAVIRT
2749a69787SGlauber de Oliveira Costa#include <asm/asm-offsets.h>
2849a69787SGlauber de Oliveira Costa#include <asm/paravirt.h>
29ffc4bc9cSH. Peter Anvin#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
3049a69787SGlauber de Oliveira Costa#else
31ffc4bc9cSH. Peter Anvin#define GET_CR2_INTO(reg) movq %cr2, reg
329900aa2fSH. Peter Anvin#define INTERRUPT_RETURN iretq
3349a69787SGlauber de Oliveira Costa#endif
3449a69787SGlauber de Oliveira Costa
353ad2f3fbSDaniel Mack/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
36250c2277SThomas Gleixner * because we need identity-mapped pages.
37250c2277SThomas Gleixner *
38250c2277SThomas Gleixner */
39250c2277SThomas Gleixner
40a6523748SEduardo Habkost#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
41a6523748SEduardo Habkost
42021182e5SThomas GarnierL4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
43a6523748SEduardo HabkostL4_START_KERNEL = pgd_index(__START_KERNEL_map)
44a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map)
45a6523748SEduardo Habkost
46250c2277SThomas Gleixner	.text
474ae59b91STim Abbott	__HEAD
48250c2277SThomas Gleixner	.code64
49250c2277SThomas Gleixner	.globl startup_64
50250c2277SThomas Gleixnerstartup_64:
51250c2277SThomas Gleixner	/*
521256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
53250c2277SThomas Gleixner	 * and someone has loaded an identity mapped page table
54250c2277SThomas Gleixner	 * for us.  These identity mapped page tables map all of the
55250c2277SThomas Gleixner	 * kernel pages and possibly all of memory.
56250c2277SThomas Gleixner	 *
578170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
58250c2277SThomas Gleixner	 *
59250c2277SThomas Gleixner	 * We come here either directly from a 64bit bootloader, or from
605b171e82SAlexander Kuleshov	 * arch/x86/boot/compressed/head_64.S.
61250c2277SThomas Gleixner	 *
62250c2277SThomas Gleixner	 * We only come here initially at boot nothing else comes here.
63250c2277SThomas Gleixner	 *
64250c2277SThomas Gleixner	 * Since we may be loaded at an address different from what we were
65250c2277SThomas Gleixner	 * compiled to run at we first fixup the physical addresses in our page
66250c2277SThomas Gleixner	 * tables and then reload them.
67250c2277SThomas Gleixner	 */
68250c2277SThomas Gleixner
6922dc3918SJosh Poimboeuf	/* Set up the stack for verify_cpu(), similar to initial_stack below */
7022dc3918SJosh Poimboeuf	leaq	(__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
7191ed140dSBorislav Petkov
7204633df0SBorislav Petkov	/* Sanitize CPU configuration */
7304633df0SBorislav Petkov	call verify_cpu
7404633df0SBorislav Petkov
75250c2277SThomas Gleixner	leaq	_text(%rip), %rdi
76c88d7150SKirill A. Shutemov	pushq	%rsi
77c88d7150SKirill A. Shutemov	call	__startup_64
78c88d7150SKirill A. Shutemov	popq	%rsi
79250c2277SThomas Gleixner
808170e6beSH. Peter Anvin	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
818170e6beSH. Peter Anvin	jmp 1f
82250c2277SThomas GleixnerENTRY(secondary_startup_64)
83250c2277SThomas Gleixner	/*
841256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
85250c2277SThomas Gleixner	 * and someone has loaded a mapped page table.
86250c2277SThomas Gleixner	 *
878170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
88250c2277SThomas Gleixner	 *
89250c2277SThomas Gleixner	 * We come here either from startup_64 (using physical addresses)
90250c2277SThomas Gleixner	 * or from trampoline.S (using virtual addresses).
91250c2277SThomas Gleixner	 *
92250c2277SThomas Gleixner	 * Using virtual addresses from trampoline.S removes the need
93250c2277SThomas Gleixner	 * to have any identity mapped pages in the kernel page table
94250c2277SThomas Gleixner	 * after the boot processor executes this code.
95250c2277SThomas Gleixner	 */
96250c2277SThomas Gleixner
9704633df0SBorislav Petkov	/* Sanitize CPU configuration */
9804633df0SBorislav Petkov	call verify_cpu
9904633df0SBorislav Petkov
1008170e6beSH. Peter Anvin	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
1018170e6beSH. Peter Anvin1:
1028170e6beSH. Peter Anvin
103250c2277SThomas Gleixner	/* Enable PAE mode and PGE */
1048170e6beSH. Peter Anvin	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
1058170e6beSH. Peter Anvin	movq	%rcx, %cr4
106250c2277SThomas Gleixner
107250c2277SThomas Gleixner	/* Setup early boot stage 4 level pagetables. */
108250c2277SThomas Gleixner	addq	phys_base(%rip), %rax
109250c2277SThomas Gleixner	movq	%rax, %cr3
110250c2277SThomas Gleixner
111250c2277SThomas Gleixner	/* Ensure I am executing from virtual addresses */
112250c2277SThomas Gleixner	movq	$1f, %rax
113250c2277SThomas Gleixner	jmp	*%rax
114250c2277SThomas Gleixner1:
115250c2277SThomas Gleixner
116250c2277SThomas Gleixner	/* Check if nx is implemented */
117250c2277SThomas Gleixner	movl	$0x80000001, %eax
118250c2277SThomas Gleixner	cpuid
119250c2277SThomas Gleixner	movl	%edx,%edi
120250c2277SThomas Gleixner
121250c2277SThomas Gleixner	/* Setup EFER (Extended Feature Enable Register) */
122250c2277SThomas Gleixner	movl	$MSR_EFER, %ecx
123250c2277SThomas Gleixner	rdmsr
124250c2277SThomas Gleixner	btsl	$_EFER_SCE, %eax	/* Enable System Call */
125250c2277SThomas Gleixner	btl	$20,%edi		/* No Execute supported? */
126250c2277SThomas Gleixner	jnc     1f
127250c2277SThomas Gleixner	btsl	$_EFER_NX, %eax
12878d77df7SH. Peter Anvin	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
129250c2277SThomas Gleixner1:	wrmsr				/* Make changes effective */
130250c2277SThomas Gleixner
131250c2277SThomas Gleixner	/* Setup cr0 */
132369101daSCyrill Gorcunov#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
133369101daSCyrill Gorcunov			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
134369101daSCyrill Gorcunov			 X86_CR0_PG)
135369101daSCyrill Gorcunov	movl	$CR0_STATE, %eax
136250c2277SThomas Gleixner	/* Make changes effective */
137250c2277SThomas Gleixner	movq	%rax, %cr0
138250c2277SThomas Gleixner
139250c2277SThomas Gleixner	/* Setup a boot time stack */
140b32f96c7SJosh Poimboeuf	movq initial_stack(%rip), %rsp
141250c2277SThomas Gleixner
142250c2277SThomas Gleixner	/* zero EFLAGS after setting rsp */
143250c2277SThomas Gleixner	pushq $0
144250c2277SThomas Gleixner	popfq
145250c2277SThomas Gleixner
146250c2277SThomas Gleixner	/*
147250c2277SThomas Gleixner	 * We must switch to a new descriptor in kernel space for the GDT
148250c2277SThomas Gleixner	 * because soon the kernel won't have access anymore to the userspace
149250c2277SThomas Gleixner	 * addresses where we're currently running on. We have to do that here
150250c2277SThomas Gleixner	 * because in 32bit we couldn't load a 64bit linear address.
151250c2277SThomas Gleixner	 */
152a939098aSGlauber Costa	lgdt	early_gdt_descr(%rip)
153250c2277SThomas Gleixner
1548ec6993dSBrian Gerst	/* set up data segments */
1558ec6993dSBrian Gerst	xorl %eax,%eax
156250c2277SThomas Gleixner	movl %eax,%ds
157250c2277SThomas Gleixner	movl %eax,%ss
158250c2277SThomas Gleixner	movl %eax,%es
159250c2277SThomas Gleixner
160250c2277SThomas Gleixner	/*
161250c2277SThomas Gleixner	 * We don't really need to load %fs or %gs, but load them anyway
162250c2277SThomas Gleixner	 * to kill any stale realmode selectors.  This allows execution
163250c2277SThomas Gleixner	 * under VT hardware.
164250c2277SThomas Gleixner	 */
165250c2277SThomas Gleixner	movl %eax,%fs
166250c2277SThomas Gleixner	movl %eax,%gs
167250c2277SThomas Gleixner
168f32ff538STejun Heo	/* Set up %gs.
169f32ff538STejun Heo	 *
170947e76cdSBrian Gerst	 * The base of %gs always points to the bottom of the irqstack
171947e76cdSBrian Gerst	 * union.  If the stack protector canary is enabled, it is
172947e76cdSBrian Gerst	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
173947e76cdSBrian Gerst	 * init data section till per cpu areas are set up.
174250c2277SThomas Gleixner	 */
175250c2277SThomas Gleixner	movl	$MSR_GS_BASE,%ecx
176650fb439SBrian Gerst	movl	initial_gs(%rip),%eax
177650fb439SBrian Gerst	movl	initial_gs+4(%rip),%edx
178250c2277SThomas Gleixner	wrmsr
179250c2277SThomas Gleixner
1808170e6beSH. Peter Anvin	/* rsi is pointer to real mode structure with interesting info.
181250c2277SThomas Gleixner	   pass it to C */
1828170e6beSH. Peter Anvin	movq	%rsi, %rdi
183250c2277SThomas Gleixner
18479d243a0SBorislav Petkov.Ljump_to_C_code:
185a9468df5SJosh Poimboeuf	/*
186a9468df5SJosh Poimboeuf	 * Jump to run C code and to be on a real kernel address.
187250c2277SThomas Gleixner	 * Since we are running on identity-mapped space we have to jump
188250c2277SThomas Gleixner	 * to the full 64bit address, this is only possible as indirect
189250c2277SThomas Gleixner	 * jump.  In addition we need to ensure %cs is set so we make this
190250c2277SThomas Gleixner	 * a far return.
1918170e6beSH. Peter Anvin	 *
1928170e6beSH. Peter Anvin	 * Note: do not change to far jump indirect with 64bit offset.
1938170e6beSH. Peter Anvin	 *
1948170e6beSH. Peter Anvin	 * AMD does not support far jump indirect with 64bit offset.
1958170e6beSH. Peter Anvin	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
1968170e6beSH. Peter Anvin	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
1978170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
1988170e6beSH. Peter Anvin	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
1998170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
2008170e6beSH. Peter Anvin	 *
2018170e6beSH. Peter Anvin	 * Intel64 does support 64bit offset.
2028170e6beSH. Peter Anvin	 * Software Developer Manual Vol 2: states:
2038170e6beSH. Peter Anvin	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
2048170e6beSH. Peter Anvin	 *		address given in m16:16
2058170e6beSH. Peter Anvin	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
2068170e6beSH. Peter Anvin	 *		address given in m16:32.
2078170e6beSH. Peter Anvin	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
2088170e6beSH. Peter Anvin	 *		address given in m16:64.
209250c2277SThomas Gleixner	 */
21031dcfec1SJosh Poimboeuf	pushq	$.Lafter_lret	# put return address on stack for unwinder
21131dcfec1SJosh Poimboeuf	xorq	%rbp, %rbp	# clear frame pointer
212250c2277SThomas Gleixner	movq	initial_code(%rip), %rax
213250c2277SThomas Gleixner	pushq	$__KERNEL_CS	# set correct cs
214250c2277SThomas Gleixner	pushq	%rax		# target address in negative space
215250c2277SThomas Gleixner	lretq
21631dcfec1SJosh Poimboeuf.Lafter_lret:
21779d243a0SBorislav PetkovENDPROC(secondary_startup_64)
218250c2277SThomas Gleixner
21904633df0SBorislav Petkov#include "verify_cpu.S"
22004633df0SBorislav Petkov
22142e78e97SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU
22242e78e97SFenghua Yu/*
22342e78e97SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
22442e78e97SFenghua Yu * up already except stack. We just set up stack here. Then call
22579d243a0SBorislav Petkov * start_secondary() via .Ljump_to_C_code.
22642e78e97SFenghua Yu */
22742e78e97SFenghua YuENTRY(start_cpu0)
228b32f96c7SJosh Poimboeuf	movq	initial_stack(%rip), %rsp
22979d243a0SBorislav Petkov	jmp	.Ljump_to_C_code
23042e78e97SFenghua YuENDPROC(start_cpu0)
23142e78e97SFenghua Yu#endif
23242e78e97SFenghua Yu
233b32f96c7SJosh Poimboeuf	/* Both SMP bootup and ACPI suspend change these variables */
234da5968aeSSam Ravnborg	__REFDATA
2358170e6beSH. Peter Anvin	.balign	8
2368170e6beSH. Peter Anvin	GLOBAL(initial_code)
237250c2277SThomas Gleixner	.quad	x86_64_start_kernel
2388170e6beSH. Peter Anvin	GLOBAL(initial_gs)
2392add8e23SBrian Gerst	.quad	INIT_PER_CPU_VAR(irq_stack_union)
240b32f96c7SJosh Poimboeuf	GLOBAL(initial_stack)
24122dc3918SJosh Poimboeuf	/*
24222dc3918SJosh Poimboeuf	 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
24322dc3918SJosh Poimboeuf	 * unwinder reliably detect the end of the stack.
24422dc3918SJosh Poimboeuf	 */
24522dc3918SJosh Poimboeuf	.quad  init_thread_union + THREAD_SIZE - SIZEOF_PTREGS
246b9af7c0dSSuresh Siddha	__FINITDATA
247250c2277SThomas Gleixner
248250c2277SThomas Gleixnerbad_address:
249250c2277SThomas Gleixner	jmp bad_address
250250c2277SThomas Gleixner
2518170e6beSH. Peter Anvin	__INIT
252cdeb6048SAndy LutomirskiENTRY(early_idt_handler_array)
2539900aa2fSH. Peter Anvin	# 104(%rsp) %rflags
2549900aa2fSH. Peter Anvin	#  96(%rsp) %cs
2559900aa2fSH. Peter Anvin	#  88(%rsp) %rip
2569900aa2fSH. Peter Anvin	#  80(%rsp) error code
257749c970aSAndi Kleen	i = 0
258749c970aSAndi Kleen	.rept NUM_EXCEPTION_VECTORS
259cdeb6048SAndy Lutomirski	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
2609900aa2fSH. Peter Anvin	pushq $0		# Dummy error code, to make stack frame uniform
2619900aa2fSH. Peter Anvin	.endif
2629900aa2fSH. Peter Anvin	pushq $i		# 72(%rsp) Vector number
263cdeb6048SAndy Lutomirski	jmp early_idt_handler_common
264749c970aSAndi Kleen	i = i + 1
265cdeb6048SAndy Lutomirski	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
266749c970aSAndi Kleen	.endr
267cdeb6048SAndy LutomirskiENDPROC(early_idt_handler_array)
2688866cd9dSRoland McGrath
269cdeb6048SAndy Lutomirskiearly_idt_handler_common:
270cdeb6048SAndy Lutomirski	/*
271cdeb6048SAndy Lutomirski	 * The stack is the hardware frame, an error code or zero, and the
272cdeb6048SAndy Lutomirski	 * vector number.
273cdeb6048SAndy Lutomirski	 */
2749900aa2fSH. Peter Anvin	cld
2759900aa2fSH. Peter Anvin
276250c2277SThomas Gleixner	incl early_recursion_flag(%rip)
2779900aa2fSH. Peter Anvin
2787bbcdb1cSAndy Lutomirski	/* The vector number is currently in the pt_regs->di slot. */
2797bbcdb1cSAndy Lutomirski	pushq %rsi				/* pt_regs->si */
2807bbcdb1cSAndy Lutomirski	movq 8(%rsp), %rsi			/* RSI = vector number */
2817bbcdb1cSAndy Lutomirski	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
2827bbcdb1cSAndy Lutomirski	pushq %rdx				/* pt_regs->dx */
2837bbcdb1cSAndy Lutomirski	pushq %rcx				/* pt_regs->cx */
2847bbcdb1cSAndy Lutomirski	pushq %rax				/* pt_regs->ax */
2857bbcdb1cSAndy Lutomirski	pushq %r8				/* pt_regs->r8 */
2867bbcdb1cSAndy Lutomirski	pushq %r9				/* pt_regs->r9 */
2877bbcdb1cSAndy Lutomirski	pushq %r10				/* pt_regs->r10 */
2887bbcdb1cSAndy Lutomirski	pushq %r11				/* pt_regs->r11 */
2897bbcdb1cSAndy Lutomirski	pushq %rbx				/* pt_regs->bx */
2907bbcdb1cSAndy Lutomirski	pushq %rbp				/* pt_regs->bp */
2917bbcdb1cSAndy Lutomirski	pushq %r12				/* pt_regs->r12 */
2927bbcdb1cSAndy Lutomirski	pushq %r13				/* pt_regs->r13 */
2937bbcdb1cSAndy Lutomirski	pushq %r14				/* pt_regs->r14 */
2947bbcdb1cSAndy Lutomirski	pushq %r15				/* pt_regs->r15 */
2959900aa2fSH. Peter Anvin
2967bbcdb1cSAndy Lutomirski	cmpq $14,%rsi		/* Page fault? */
2978170e6beSH. Peter Anvin	jnz 10f
2987bbcdb1cSAndy Lutomirski	GET_CR2_INTO(%rdi)	/* Can clobber any volatile register if pv */
2998170e6beSH. Peter Anvin	call early_make_pgtable
3008170e6beSH. Peter Anvin	andl %eax,%eax
3017bbcdb1cSAndy Lutomirski	jz 20f			/* All good */
3028170e6beSH. Peter Anvin
3038170e6beSH. Peter Anvin10:
3047bbcdb1cSAndy Lutomirski	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
3059900aa2fSH. Peter Anvin	call early_fixup_exception
3069900aa2fSH. Peter Anvin
3070e861fbbSAndy Lutomirski20:
3089900aa2fSH. Peter Anvin	decl early_recursion_flag(%rip)
3097bbcdb1cSAndy Lutomirski	jmp restore_regs_and_iret
310cdeb6048SAndy LutomirskiENDPROC(early_idt_handler_common)
3119900aa2fSH. Peter Anvin
3128170e6beSH. Peter Anvin	__INITDATA
3138170e6beSH. Peter Anvin
3149900aa2fSH. Peter Anvin	.balign 4
3150e861fbbSAndy LutomirskiGLOBAL(early_recursion_flag)
316250c2277SThomas Gleixner	.long 0
317250c2277SThomas Gleixner
318250c2277SThomas Gleixner#define NEXT_PAGE(name) \
319250c2277SThomas Gleixner	.balign	PAGE_SIZE; \
3208170e6beSH. Peter AnvinGLOBAL(name)
321250c2277SThomas Gleixner
322250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */
323250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT)			\
324250c2277SThomas Gleixner	i = 0 ;						\
325250c2277SThomas Gleixner	.rept (COUNT) ;					\
3260e192b99SCyrill Gorcunov	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
327250c2277SThomas Gleixner	i = i + 1 ;					\
328250c2277SThomas Gleixner	.endr
329250c2277SThomas Gleixner
3308170e6beSH. Peter Anvin	__INITDATA
3318170e6beSH. Peter AnvinNEXT_PAGE(early_level4_pgt)
3328170e6beSH. Peter Anvin	.fill	511,8,0
3338170e6beSH. Peter Anvin	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
3348170e6beSH. Peter Anvin
3358170e6beSH. Peter AnvinNEXT_PAGE(early_dynamic_pgts)
3368170e6beSH. Peter Anvin	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
3378170e6beSH. Peter Anvin
338b9af7c0dSSuresh Siddha	.data
3398170e6beSH. Peter Anvin
3408170e6beSH. Peter Anvin#ifndef CONFIG_XEN
3418170e6beSH. Peter AnvinNEXT_PAGE(init_level4_pgt)
3428170e6beSH. Peter Anvin	.fill	512,8,0
3438170e6beSH. Peter Anvin#else
344250c2277SThomas GleixnerNEXT_PAGE(init_level4_pgt)
345250c2277SThomas Gleixner	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
346a6523748SEduardo Habkost	.org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
347250c2277SThomas Gleixner	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
348a6523748SEduardo Habkost	.org    init_level4_pgt + L4_START_KERNEL*8, 0
349250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
350250c2277SThomas Gleixner	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
351250c2277SThomas Gleixner
352250c2277SThomas GleixnerNEXT_PAGE(level3_ident_pgt)
353250c2277SThomas Gleixner	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
354250c2277SThomas Gleixner	.fill	511, 8, 0
3558170e6beSH. Peter AnvinNEXT_PAGE(level2_ident_pgt)
3568170e6beSH. Peter Anvin	/* Since I easily can, map the first 1G.
3578170e6beSH. Peter Anvin	 * Don't set NX because code runs from these pages.
3588170e6beSH. Peter Anvin	 */
3598170e6beSH. Peter Anvin	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
3608170e6beSH. Peter Anvin#endif
361250c2277SThomas Gleixner
362250c2277SThomas GleixnerNEXT_PAGE(level3_kernel_pgt)
363a6523748SEduardo Habkost	.fill	L3_START_KERNEL,8,0
364250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
365250c2277SThomas Gleixner	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
366250c2277SThomas Gleixner	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
367250c2277SThomas Gleixner
368250c2277SThomas GleixnerNEXT_PAGE(level2_kernel_pgt)
36988f3aec7SIngo Molnar	/*
37085eb69a1SIngo Molnar	 * 512 MB kernel mapping. We spend a full page on this pagetable
37188f3aec7SIngo Molnar	 * anyway.
37288f3aec7SIngo Molnar	 *
37388f3aec7SIngo Molnar	 * The kernel code+data+bss must not be bigger than that.
37488f3aec7SIngo Molnar	 *
37585eb69a1SIngo Molnar	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
37688f3aec7SIngo Molnar	 *  If you want to increase this then increase MODULES_VADDR
37788f3aec7SIngo Molnar	 *  too.)
37888f3aec7SIngo Molnar	 */
3798490638cSJeremy Fitzhardinge	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
380d4afe414SIngo Molnar		KERNEL_IMAGE_SIZE/PMD_SIZE)
381250c2277SThomas Gleixner
3828170e6beSH. Peter AnvinNEXT_PAGE(level2_fixmap_pgt)
3838170e6beSH. Peter Anvin	.fill	506,8,0
3848170e6beSH. Peter Anvin	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
3858170e6beSH. Peter Anvin	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
3868170e6beSH. Peter Anvin	.fill	5,8,0
3878170e6beSH. Peter Anvin
3888170e6beSH. Peter AnvinNEXT_PAGE(level1_fixmap_pgt)
389250c2277SThomas Gleixner	.fill	512,8,0
390250c2277SThomas Gleixner
391250c2277SThomas Gleixner#undef PMDS
392250c2277SThomas Gleixner
393250c2277SThomas Gleixner	.data
394250c2277SThomas Gleixner	.align 16
395a939098aSGlauber Costa	.globl early_gdt_descr
396a939098aSGlauber Costaearly_gdt_descr:
397a939098aSGlauber Costa	.word	GDT_ENTRIES*8-1
3983e5d8f97STejun Heoearly_gdt_descr_base:
3992add8e23SBrian Gerst	.quad	INIT_PER_CPU_VAR(gdt_page)
400250c2277SThomas Gleixner
401250c2277SThomas GleixnerENTRY(phys_base)
402250c2277SThomas Gleixner	/* This must match the first entry in level2_kernel_pgt */
403250c2277SThomas Gleixner	.quad   0x0000000000000000
404784d5699SAl ViroEXPORT_SYMBOL(phys_base)
405250c2277SThomas Gleixner
4068c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S"
407250c2277SThomas Gleixner
40802b7da37STim Abbott	__PAGE_ALIGNED_BSS
4098170e6beSH. Peter AnvinNEXT_PAGE(empty_zero_page)
410250c2277SThomas Gleixner	.skip PAGE_SIZE
411784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page)
412ef7f0d6aSAndrey Ryabinin
413