xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision 37818afd)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
2250c2277SThomas Gleixner/*
35b171e82SAlexander Kuleshov *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4250c2277SThomas Gleixner *
5250c2277SThomas Gleixner *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6250c2277SThomas Gleixner *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7250c2277SThomas Gleixner *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8250c2277SThomas Gleixner *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9250c2277SThomas Gleixner *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10250c2277SThomas Gleixner */
11250c2277SThomas Gleixner
12250c2277SThomas Gleixner
13250c2277SThomas Gleixner#include <linux/linkage.h>
14250c2277SThomas Gleixner#include <linux/threads.h>
15250c2277SThomas Gleixner#include <linux/init.h>
16250c2277SThomas Gleixner#include <asm/segment.h>
17250c2277SThomas Gleixner#include <asm/pgtable.h>
18250c2277SThomas Gleixner#include <asm/page.h>
19250c2277SThomas Gleixner#include <asm/msr.h>
20250c2277SThomas Gleixner#include <asm/cache.h>
21369101daSCyrill Gorcunov#include <asm/processor-flags.h>
22b12d8db8STejun Heo#include <asm/percpu.h>
239900aa2fSH. Peter Anvin#include <asm/nops.h>
247bbcdb1cSAndy Lutomirski#include "../entry/calling.h"
25784d5699SAl Viro#include <asm/export.h>
26bd89004fSPeter Zijlstra#include <asm/nospec-branch.h>
2705ab1d8aSFeng Tang#include <asm/fixmap.h>
28250c2277SThomas Gleixner
29fdc0269eSJuergen Gross#ifdef CONFIG_PARAVIRT_XXL
3049a69787SGlauber de Oliveira Costa#include <asm/asm-offsets.h>
3149a69787SGlauber de Oliveira Costa#include <asm/paravirt.h>
3249a69787SGlauber de Oliveira Costa#else
339900aa2fSH. Peter Anvin#define INTERRUPT_RETURN iretq
3449a69787SGlauber de Oliveira Costa#endif
3549a69787SGlauber de Oliveira Costa
363ad2f3fbSDaniel Mack/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
37250c2277SThomas Gleixner * because we need identity-mapped pages.
38250c2277SThomas Gleixner *
39250c2277SThomas Gleixner */
40250c2277SThomas Gleixner
41b9952ec7SKirill A. Shutemov#define l4_index(x)	(((x) >> 39) & 511)
42a6523748SEduardo Habkost#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
43a6523748SEduardo Habkost
44b9952ec7SKirill A. ShutemovL4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
45b9952ec7SKirill A. ShutemovL4_START_KERNEL = l4_index(__START_KERNEL_map)
46b9952ec7SKirill A. Shutemov
47a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map)
48a6523748SEduardo Habkost
49250c2277SThomas Gleixner	.text
504ae59b91STim Abbott	__HEAD
51250c2277SThomas Gleixner	.code64
5237818afdSJiri SlabySYM_CODE_START_NOALIGN(startup_64)
532704fbb6SJosh Poimboeuf	UNWIND_HINT_EMPTY
54250c2277SThomas Gleixner	/*
551256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
56250c2277SThomas Gleixner	 * and someone has loaded an identity mapped page table
57250c2277SThomas Gleixner	 * for us.  These identity mapped page tables map all of the
58250c2277SThomas Gleixner	 * kernel pages and possibly all of memory.
59250c2277SThomas Gleixner	 *
608170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
61250c2277SThomas Gleixner	 *
62250c2277SThomas Gleixner	 * We come here either directly from a 64bit bootloader, or from
635b171e82SAlexander Kuleshov	 * arch/x86/boot/compressed/head_64.S.
64250c2277SThomas Gleixner	 *
65250c2277SThomas Gleixner	 * We only come here initially at boot nothing else comes here.
66250c2277SThomas Gleixner	 *
67250c2277SThomas Gleixner	 * Since we may be loaded at an address different from what we were
68250c2277SThomas Gleixner	 * compiled to run at we first fixup the physical addresses in our page
69250c2277SThomas Gleixner	 * tables and then reload them.
70250c2277SThomas Gleixner	 */
71250c2277SThomas Gleixner
7222dc3918SJosh Poimboeuf	/* Set up the stack for verify_cpu(), similar to initial_stack below */
7322dc3918SJosh Poimboeuf	leaq	(__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
7491ed140dSBorislav Petkov
7504633df0SBorislav Petkov	/* Sanitize CPU configuration */
7604633df0SBorislav Petkov	call verify_cpu
7704633df0SBorislav Petkov
785868f365STom Lendacky	/*
795868f365STom Lendacky	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
805868f365STom Lendacky	 * the kernel and retrieve the modifier (SME encryption mask if SME
815868f365STom Lendacky	 * is active) to be added to the initial pgdir entry that will be
825868f365STom Lendacky	 * programmed into CR3.
835868f365STom Lendacky	 */
84250c2277SThomas Gleixner	leaq	_text(%rip), %rdi
85c88d7150SKirill A. Shutemov	pushq	%rsi
86c88d7150SKirill A. Shutemov	call	__startup_64
87c88d7150SKirill A. Shutemov	popq	%rsi
88250c2277SThomas Gleixner
895868f365STom Lendacky	/* Form the CR3 value being sure to include the CR3 modifier */
905868f365STom Lendacky	addq	$(early_top_pgt - __START_KERNEL_map), %rax
918170e6beSH. Peter Anvin	jmp 1f
9237818afdSJiri SlabySYM_CODE_END(startup_64)
9337818afdSJiri Slaby
94250c2277SThomas GleixnerENTRY(secondary_startup_64)
952704fbb6SJosh Poimboeuf	UNWIND_HINT_EMPTY
96250c2277SThomas Gleixner	/*
971256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
98250c2277SThomas Gleixner	 * and someone has loaded a mapped page table.
99250c2277SThomas Gleixner	 *
1008170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
101250c2277SThomas Gleixner	 *
102250c2277SThomas Gleixner	 * We come here either from startup_64 (using physical addresses)
103250c2277SThomas Gleixner	 * or from trampoline.S (using virtual addresses).
104250c2277SThomas Gleixner	 *
105250c2277SThomas Gleixner	 * Using virtual addresses from trampoline.S removes the need
106250c2277SThomas Gleixner	 * to have any identity mapped pages in the kernel page table
107250c2277SThomas Gleixner	 * after the boot processor executes this code.
108250c2277SThomas Gleixner	 */
109250c2277SThomas Gleixner
11004633df0SBorislav Petkov	/* Sanitize CPU configuration */
11104633df0SBorislav Petkov	call verify_cpu
11204633df0SBorislav Petkov
1135868f365STom Lendacky	/*
1145868f365STom Lendacky	 * Retrieve the modifier (SME encryption mask if SME is active) to be
1155868f365STom Lendacky	 * added to the initial pgdir entry that will be programmed into CR3.
1165868f365STom Lendacky	 */
1175868f365STom Lendacky	pushq	%rsi
1185868f365STom Lendacky	call	__startup_secondary_64
1195868f365STom Lendacky	popq	%rsi
1205868f365STom Lendacky
1215868f365STom Lendacky	/* Form the CR3 value being sure to include the CR3 modifier */
1225868f365STom Lendacky	addq	$(init_top_pgt - __START_KERNEL_map), %rax
1238170e6beSH. Peter Anvin1:
1248170e6beSH. Peter Anvin
125032370b9SKirill A. Shutemov	/* Enable PAE mode, PGE and LA57 */
1268170e6beSH. Peter Anvin	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
127032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL
12839b95522SKirill A. Shutemov	testl	$1, __pgtable_l5_enabled(%rip)
1296f9dd329SKirill A. Shutemov	jz	1f
130032370b9SKirill A. Shutemov	orl	$X86_CR4_LA57, %ecx
1316f9dd329SKirill A. Shutemov1:
132032370b9SKirill A. Shutemov#endif
1338170e6beSH. Peter Anvin	movq	%rcx, %cr4
134250c2277SThomas Gleixner
135032370b9SKirill A. Shutemov	/* Setup early boot stage 4-/5-level pagetables. */
136250c2277SThomas Gleixner	addq	phys_base(%rip), %rax
137250c2277SThomas Gleixner	movq	%rax, %cr3
138250c2277SThomas Gleixner
139250c2277SThomas Gleixner	/* Ensure I am executing from virtual addresses */
140250c2277SThomas Gleixner	movq	$1f, %rax
141bd89004fSPeter Zijlstra	ANNOTATE_RETPOLINE_SAFE
142250c2277SThomas Gleixner	jmp	*%rax
143250c2277SThomas Gleixner1:
1442704fbb6SJosh Poimboeuf	UNWIND_HINT_EMPTY
145250c2277SThomas Gleixner
146250c2277SThomas Gleixner	/* Check if nx is implemented */
147250c2277SThomas Gleixner	movl	$0x80000001, %eax
148250c2277SThomas Gleixner	cpuid
149250c2277SThomas Gleixner	movl	%edx,%edi
150250c2277SThomas Gleixner
151250c2277SThomas Gleixner	/* Setup EFER (Extended Feature Enable Register) */
152250c2277SThomas Gleixner	movl	$MSR_EFER, %ecx
153250c2277SThomas Gleixner	rdmsr
154250c2277SThomas Gleixner	btsl	$_EFER_SCE, %eax	/* Enable System Call */
155250c2277SThomas Gleixner	btl	$20,%edi		/* No Execute supported? */
156250c2277SThomas Gleixner	jnc     1f
157250c2277SThomas Gleixner	btsl	$_EFER_NX, %eax
15878d77df7SH. Peter Anvin	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
159250c2277SThomas Gleixner1:	wrmsr				/* Make changes effective */
160250c2277SThomas Gleixner
161250c2277SThomas Gleixner	/* Setup cr0 */
162369101daSCyrill Gorcunov	movl	$CR0_STATE, %eax
163250c2277SThomas Gleixner	/* Make changes effective */
164250c2277SThomas Gleixner	movq	%rax, %cr0
165250c2277SThomas Gleixner
166250c2277SThomas Gleixner	/* Setup a boot time stack */
167b32f96c7SJosh Poimboeuf	movq initial_stack(%rip), %rsp
168250c2277SThomas Gleixner
169250c2277SThomas Gleixner	/* zero EFLAGS after setting rsp */
170250c2277SThomas Gleixner	pushq $0
171250c2277SThomas Gleixner	popfq
172250c2277SThomas Gleixner
173250c2277SThomas Gleixner	/*
174250c2277SThomas Gleixner	 * We must switch to a new descriptor in kernel space for the GDT
175250c2277SThomas Gleixner	 * because soon the kernel won't have access anymore to the userspace
176250c2277SThomas Gleixner	 * addresses where we're currently running on. We have to do that here
177250c2277SThomas Gleixner	 * because in 32bit we couldn't load a 64bit linear address.
178250c2277SThomas Gleixner	 */
179a939098aSGlauber Costa	lgdt	early_gdt_descr(%rip)
180250c2277SThomas Gleixner
1818ec6993dSBrian Gerst	/* set up data segments */
1828ec6993dSBrian Gerst	xorl %eax,%eax
183250c2277SThomas Gleixner	movl %eax,%ds
184250c2277SThomas Gleixner	movl %eax,%ss
185250c2277SThomas Gleixner	movl %eax,%es
186250c2277SThomas Gleixner
187250c2277SThomas Gleixner	/*
188250c2277SThomas Gleixner	 * We don't really need to load %fs or %gs, but load them anyway
189250c2277SThomas Gleixner	 * to kill any stale realmode selectors.  This allows execution
190250c2277SThomas Gleixner	 * under VT hardware.
191250c2277SThomas Gleixner	 */
192250c2277SThomas Gleixner	movl %eax,%fs
193250c2277SThomas Gleixner	movl %eax,%gs
194250c2277SThomas Gleixner
195f32ff538STejun Heo	/* Set up %gs.
196f32ff538STejun Heo	 *
19738506573SCao jin	 * The base of %gs always points to fixed_percpu_data. If the
19838506573SCao jin	 * stack protector canary is enabled, it is located at %gs:40.
19938506573SCao jin	 * Note that, on SMP, the boot cpu uses init data section until
20038506573SCao jin	 * the per cpu areas are set up.
201250c2277SThomas Gleixner	 */
202250c2277SThomas Gleixner	movl	$MSR_GS_BASE,%ecx
203650fb439SBrian Gerst	movl	initial_gs(%rip),%eax
204650fb439SBrian Gerst	movl	initial_gs+4(%rip),%edx
205250c2277SThomas Gleixner	wrmsr
206250c2277SThomas Gleixner
2078170e6beSH. Peter Anvin	/* rsi is pointer to real mode structure with interesting info.
208250c2277SThomas Gleixner	   pass it to C */
2098170e6beSH. Peter Anvin	movq	%rsi, %rdi
210250c2277SThomas Gleixner
21179d243a0SBorislav Petkov.Ljump_to_C_code:
212a9468df5SJosh Poimboeuf	/*
213a9468df5SJosh Poimboeuf	 * Jump to run C code and to be on a real kernel address.
214250c2277SThomas Gleixner	 * Since we are running on identity-mapped space we have to jump
215250c2277SThomas Gleixner	 * to the full 64bit address, this is only possible as indirect
216250c2277SThomas Gleixner	 * jump.  In addition we need to ensure %cs is set so we make this
217250c2277SThomas Gleixner	 * a far return.
2188170e6beSH. Peter Anvin	 *
2198170e6beSH. Peter Anvin	 * Note: do not change to far jump indirect with 64bit offset.
2208170e6beSH. Peter Anvin	 *
2218170e6beSH. Peter Anvin	 * AMD does not support far jump indirect with 64bit offset.
2228170e6beSH. Peter Anvin	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
2238170e6beSH. Peter Anvin	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
2248170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
2258170e6beSH. Peter Anvin	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
2268170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
2278170e6beSH. Peter Anvin	 *
2288170e6beSH. Peter Anvin	 * Intel64 does support 64bit offset.
2298170e6beSH. Peter Anvin	 * Software Developer Manual Vol 2: states:
2308170e6beSH. Peter Anvin	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
2318170e6beSH. Peter Anvin	 *		address given in m16:16
2328170e6beSH. Peter Anvin	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
2338170e6beSH. Peter Anvin	 *		address given in m16:32.
2348170e6beSH. Peter Anvin	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
2358170e6beSH. Peter Anvin	 *		address given in m16:64.
236250c2277SThomas Gleixner	 */
23731dcfec1SJosh Poimboeuf	pushq	$.Lafter_lret	# put return address on stack for unwinder
238a7bea830SJan Beulich	xorl	%ebp, %ebp	# clear frame pointer
239250c2277SThomas Gleixner	movq	initial_code(%rip), %rax
240250c2277SThomas Gleixner	pushq	$__KERNEL_CS	# set correct cs
241250c2277SThomas Gleixner	pushq	%rax		# target address in negative space
242250c2277SThomas Gleixner	lretq
24331dcfec1SJosh Poimboeuf.Lafter_lret:
244015a2ea5SJosh PoimboeufEND(secondary_startup_64)
245250c2277SThomas Gleixner
24604633df0SBorislav Petkov#include "verify_cpu.S"
24704633df0SBorislav Petkov
24842e78e97SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU
24942e78e97SFenghua Yu/*
25042e78e97SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
25142e78e97SFenghua Yu * up already except stack. We just set up stack here. Then call
25279d243a0SBorislav Petkov * start_secondary() via .Ljump_to_C_code.
25342e78e97SFenghua Yu */
25442e78e97SFenghua YuENTRY(start_cpu0)
2552704fbb6SJosh Poimboeuf	UNWIND_HINT_EMPTY
25661a73f5cSJosh Poimboeuf	movq	initial_stack(%rip), %rsp
25779d243a0SBorislav Petkov	jmp	.Ljump_to_C_code
25861a73f5cSJosh PoimboeufEND(start_cpu0)
25942e78e97SFenghua Yu#endif
26042e78e97SFenghua Yu
261b32f96c7SJosh Poimboeuf	/* Both SMP bootup and ACPI suspend change these variables */
262da5968aeSSam Ravnborg	__REFDATA
2638170e6beSH. Peter Anvin	.balign	8
264b1bd27b9SJiri SlabySYM_DATA(initial_code,	.quad x86_64_start_kernel)
265b1bd27b9SJiri SlabySYM_DATA(initial_gs,	.quad INIT_PER_CPU_VAR(fixed_percpu_data))
266b1bd27b9SJiri Slaby
26722dc3918SJosh Poimboeuf/*
268b1bd27b9SJiri Slaby * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
269b1bd27b9SJiri Slaby * reliably detect the end of the stack.
27022dc3918SJosh Poimboeuf */
271b1bd27b9SJiri SlabySYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS)
272b9af7c0dSSuresh Siddha	__FINITDATA
273250c2277SThomas Gleixner
2748170e6beSH. Peter Anvin	__INIT
275cdeb6048SAndy LutomirskiENTRY(early_idt_handler_array)
276749c970aSAndi Kleen	i = 0
277749c970aSAndi Kleen	.rept NUM_EXCEPTION_VECTORS
27882c62fa0SJosh Poimboeuf	.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
2792704fbb6SJosh Poimboeuf		UNWIND_HINT_IRET_REGS
2809900aa2fSH. Peter Anvin		pushq $0	# Dummy error code, to make stack frame uniform
2812704fbb6SJosh Poimboeuf	.else
2822704fbb6SJosh Poimboeuf		UNWIND_HINT_IRET_REGS offset=8
2839900aa2fSH. Peter Anvin	.endif
2849900aa2fSH. Peter Anvin	pushq $i		# 72(%rsp) Vector number
285cdeb6048SAndy Lutomirski	jmp early_idt_handler_common
2862704fbb6SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
287749c970aSAndi Kleen	i = i + 1
288cdeb6048SAndy Lutomirski	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
289749c970aSAndi Kleen	.endr
2902704fbb6SJosh Poimboeuf	UNWIND_HINT_IRET_REGS offset=16
291015a2ea5SJosh PoimboeufEND(early_idt_handler_array)
2928866cd9dSRoland McGrath
293ef77e688SJiri SlabySYM_CODE_START_LOCAL(early_idt_handler_common)
294cdeb6048SAndy Lutomirski	/*
295cdeb6048SAndy Lutomirski	 * The stack is the hardware frame, an error code or zero, and the
296cdeb6048SAndy Lutomirski	 * vector number.
297cdeb6048SAndy Lutomirski	 */
2989900aa2fSH. Peter Anvin	cld
2999900aa2fSH. Peter Anvin
300250c2277SThomas Gleixner	incl early_recursion_flag(%rip)
3019900aa2fSH. Peter Anvin
3027bbcdb1cSAndy Lutomirski	/* The vector number is currently in the pt_regs->di slot. */
3037bbcdb1cSAndy Lutomirski	pushq %rsi				/* pt_regs->si */
3047bbcdb1cSAndy Lutomirski	movq 8(%rsp), %rsi			/* RSI = vector number */
3057bbcdb1cSAndy Lutomirski	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
3067bbcdb1cSAndy Lutomirski	pushq %rdx				/* pt_regs->dx */
3077bbcdb1cSAndy Lutomirski	pushq %rcx				/* pt_regs->cx */
3087bbcdb1cSAndy Lutomirski	pushq %rax				/* pt_regs->ax */
3097bbcdb1cSAndy Lutomirski	pushq %r8				/* pt_regs->r8 */
3107bbcdb1cSAndy Lutomirski	pushq %r9				/* pt_regs->r9 */
3117bbcdb1cSAndy Lutomirski	pushq %r10				/* pt_regs->r10 */
3127bbcdb1cSAndy Lutomirski	pushq %r11				/* pt_regs->r11 */
3137bbcdb1cSAndy Lutomirski	pushq %rbx				/* pt_regs->bx */
3147bbcdb1cSAndy Lutomirski	pushq %rbp				/* pt_regs->bp */
3157bbcdb1cSAndy Lutomirski	pushq %r12				/* pt_regs->r12 */
3167bbcdb1cSAndy Lutomirski	pushq %r13				/* pt_regs->r13 */
3177bbcdb1cSAndy Lutomirski	pushq %r14				/* pt_regs->r14 */
3187bbcdb1cSAndy Lutomirski	pushq %r15				/* pt_regs->r15 */
3192704fbb6SJosh Poimboeuf	UNWIND_HINT_REGS
3209900aa2fSH. Peter Anvin
3217bbcdb1cSAndy Lutomirski	cmpq $14,%rsi		/* Page fault? */
3228170e6beSH. Peter Anvin	jnz 10f
32355aedddbSPeter Zijlstra	GET_CR2_INTO(%rdi)	/* can clobber %rax if pv */
3248170e6beSH. Peter Anvin	call early_make_pgtable
3258170e6beSH. Peter Anvin	andl %eax,%eax
3267bbcdb1cSAndy Lutomirski	jz 20f			/* All good */
3278170e6beSH. Peter Anvin
3288170e6beSH. Peter Anvin10:
3297bbcdb1cSAndy Lutomirski	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
3309900aa2fSH. Peter Anvin	call early_fixup_exception
3319900aa2fSH. Peter Anvin
3320e861fbbSAndy Lutomirski20:
3339900aa2fSH. Peter Anvin	decl early_recursion_flag(%rip)
33426c4ef9cSAndy Lutomirski	jmp restore_regs_and_return_to_kernel
335ef77e688SJiri SlabySYM_CODE_END(early_idt_handler_common)
3369900aa2fSH. Peter Anvin
337b1bd27b9SJiri Slaby
338b1bd27b9SJiri Slaby#define SYM_DATA_START_PAGE_ALIGNED(name)			\
339b1bd27b9SJiri Slaby	SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
340250c2277SThomas Gleixner
341d9e9a641SDave Hansen#ifdef CONFIG_PAGE_TABLE_ISOLATION
342d9e9a641SDave Hansen/*
343d9e9a641SDave Hansen * Each PGD needs to be 8k long and 8k aligned.  We do not
344d9e9a641SDave Hansen * ever go out to userspace with these, so we do not
345d9e9a641SDave Hansen * strictly *need* the second page, but this allows us to
346d9e9a641SDave Hansen * have a single set_pgd() implementation that does not
347d9e9a641SDave Hansen * need to worry about whether it has 4k or 8k to work
348d9e9a641SDave Hansen * with.
349d9e9a641SDave Hansen *
350d9e9a641SDave Hansen * This ensures PGDs are 8k long:
351d9e9a641SDave Hansen */
352d9e9a641SDave Hansen#define PTI_USER_PGD_FILL	512
353d9e9a641SDave Hansen/* This ensures they are 8k-aligned: */
354b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \
355b1bd27b9SJiri Slaby	SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
356d9e9a641SDave Hansen#else
357b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \
358b1bd27b9SJiri Slaby	SYM_DATA_START_PAGE_ALIGNED(name)
359d9e9a641SDave Hansen#define PTI_USER_PGD_FILL	0
360d9e9a641SDave Hansen#endif
361d9e9a641SDave Hansen
362250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */
363250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT)			\
364250c2277SThomas Gleixner	i = 0 ;						\
365250c2277SThomas Gleixner	.rept (COUNT) ;					\
3660e192b99SCyrill Gorcunov	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
367250c2277SThomas Gleixner	i = i + 1 ;					\
368250c2277SThomas Gleixner	.endr
369250c2277SThomas Gleixner
3708170e6beSH. Peter Anvin	__INITDATA
3711a8770b7SJiri Slaby	.balign 4
3721a8770b7SJiri Slaby
373b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(early_top_pgt)
3746f9dd329SKirill A. Shutemov	.fill	512,8,0
375d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
376b1bd27b9SJiri SlabySYM_DATA_END(early_top_pgt)
3778170e6beSH. Peter Anvin
378b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
3798170e6beSH. Peter Anvin	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
380b1bd27b9SJiri SlabySYM_DATA_END(early_dynamic_pgts)
3818170e6beSH. Peter Anvin
382b1bd27b9SJiri SlabySYM_DATA(early_recursion_flag, .long 0)
3831a8770b7SJiri Slaby
384b9af7c0dSSuresh Siddha	.data
3858170e6beSH. Peter Anvin
3867733607fSMaran Wilson#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
387b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt)
38821729f81STom Lendacky	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
389b9952ec7SKirill A. Shutemov	.org    init_top_pgt + L4_PAGE_OFFSET*8, 0
39021729f81STom Lendacky	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
391b9952ec7SKirill A. Shutemov	.org    init_top_pgt + L4_START_KERNEL*8, 0
392250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
39321729f81STom Lendacky	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
394d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
395b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt)
396250c2277SThomas Gleixner
397b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
39821729f81STom Lendacky	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
399250c2277SThomas Gleixner	.fill	511, 8, 0
400b1bd27b9SJiri SlabySYM_DATA_END(level3_ident_pgt)
401b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
402430d4005SDave Hansen	/*
403430d4005SDave Hansen	 * Since I easily can, map the first 1G.
4048170e6beSH. Peter Anvin	 * Don't set NX because code runs from these pages.
405430d4005SDave Hansen	 *
406430d4005SDave Hansen	 * Note: This sets _PAGE_GLOBAL despite whether
407430d4005SDave Hansen	 * the CPU supports it or it is enabled.  But,
408430d4005SDave Hansen	 * the CPU should ignore the bit.
4098170e6beSH. Peter Anvin	 */
4108170e6beSH. Peter Anvin	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
411b1bd27b9SJiri SlabySYM_DATA_END(level2_ident_pgt)
4124375c299SKirill A. Shutemov#else
413b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt)
4144375c299SKirill A. Shutemov	.fill	512,8,0
415d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
416b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt)
4178170e6beSH. Peter Anvin#endif
418250c2277SThomas Gleixner
419032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL
420b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
421032370b9SKirill A. Shutemov	.fill	511,8,0
42221729f81STom Lendacky	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
423b1bd27b9SJiri SlabySYM_DATA_END(level4_kernel_pgt)
424032370b9SKirill A. Shutemov#endif
425032370b9SKirill A. Shutemov
426b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
427a6523748SEduardo Habkost	.fill	L3_START_KERNEL,8,0
428250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
42921729f81STom Lendacky	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
43021729f81STom Lendacky	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
431b1bd27b9SJiri SlabySYM_DATA_END(level3_kernel_pgt)
432250c2277SThomas Gleixner
433b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
43488f3aec7SIngo Molnar	/*
43585eb69a1SIngo Molnar	 * 512 MB kernel mapping. We spend a full page on this pagetable
43688f3aec7SIngo Molnar	 * anyway.
43788f3aec7SIngo Molnar	 *
43888f3aec7SIngo Molnar	 * The kernel code+data+bss must not be bigger than that.
43988f3aec7SIngo Molnar	 *
44085eb69a1SIngo Molnar	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
44188f3aec7SIngo Molnar	 *  If you want to increase this then increase MODULES_VADDR
44288f3aec7SIngo Molnar	 *  too.)
443430d4005SDave Hansen	 *
444430d4005SDave Hansen	 *  This table is eventually used by the kernel during normal
445430d4005SDave Hansen	 *  runtime.  Care must be taken to clear out undesired bits
446430d4005SDave Hansen	 *  later, like _PAGE_RW or _PAGE_GLOBAL in some cases.
44788f3aec7SIngo Molnar	 */
4488490638cSJeremy Fitzhardinge	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
449d4afe414SIngo Molnar		KERNEL_IMAGE_SIZE/PMD_SIZE)
450b1bd27b9SJiri SlabySYM_DATA_END(level2_kernel_pgt)
451250c2277SThomas Gleixner
452b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
45305ab1d8aSFeng Tang	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
45405ab1d8aSFeng Tang	pgtno = 0
45505ab1d8aSFeng Tang	.rept (FIXMAP_PMD_NUM)
45605ab1d8aSFeng Tang	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
45705ab1d8aSFeng Tang		+ _PAGE_TABLE_NOENC;
45805ab1d8aSFeng Tang	pgtno = pgtno + 1
45905ab1d8aSFeng Tang	.endr
46005ab1d8aSFeng Tang	/* 6 MB reserved space + a 2MB hole */
46105ab1d8aSFeng Tang	.fill	4,8,0
462b1bd27b9SJiri SlabySYM_DATA_END(level2_fixmap_pgt)
4638170e6beSH. Peter Anvin
464b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
46505ab1d8aSFeng Tang	.rept (FIXMAP_PMD_NUM)
466250c2277SThomas Gleixner	.fill	512,8,0
46705ab1d8aSFeng Tang	.endr
468b1bd27b9SJiri SlabySYM_DATA_END(level1_fixmap_pgt)
469250c2277SThomas Gleixner
470250c2277SThomas Gleixner#undef PMDS
471250c2277SThomas Gleixner
472250c2277SThomas Gleixner	.data
473250c2277SThomas Gleixner	.align 16
474250c2277SThomas Gleixner
475b1bd27b9SJiri SlabySYM_DATA(early_gdt_descr,		.word GDT_ENTRIES*8-1)
476b1bd27b9SJiri SlabySYM_DATA_LOCAL(early_gdt_descr_base,	.quad INIT_PER_CPU_VAR(gdt_page))
477b1bd27b9SJiri Slaby
478b1bd27b9SJiri Slaby	.align 16
479250c2277SThomas Gleixner/* This must match the first entry in level2_kernel_pgt */
480b1bd27b9SJiri SlabySYM_DATA(phys_base, .quad 0x0)
481784d5699SAl ViroEXPORT_SYMBOL(phys_base)
482250c2277SThomas Gleixner
4838c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S"
484250c2277SThomas Gleixner
48502b7da37STim Abbott	__PAGE_ALIGNED_BSS
486b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
487250c2277SThomas Gleixner	.skip PAGE_SIZE
488b1bd27b9SJiri SlabySYM_DATA_END(empty_zero_page)
489784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page)
490ef7f0d6aSAndrey Ryabinin
491