xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision 2f69a81a)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
2250c2277SThomas Gleixner/*
35b171e82SAlexander Kuleshov *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4250c2277SThomas Gleixner *
5250c2277SThomas Gleixner *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6250c2277SThomas Gleixner *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7250c2277SThomas Gleixner *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8250c2277SThomas Gleixner *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9250c2277SThomas Gleixner *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10250c2277SThomas Gleixner */
11250c2277SThomas Gleixner
12250c2277SThomas Gleixner
13250c2277SThomas Gleixner#include <linux/linkage.h>
14250c2277SThomas Gleixner#include <linux/threads.h>
15250c2277SThomas Gleixner#include <linux/init.h>
16ca5999fdSMike Rapoport#include <linux/pgtable.h>
1765fddcfcSMike Rapoport#include <asm/segment.h>
18250c2277SThomas Gleixner#include <asm/page.h>
19250c2277SThomas Gleixner#include <asm/msr.h>
20250c2277SThomas Gleixner#include <asm/cache.h>
21369101daSCyrill Gorcunov#include <asm/processor-flags.h>
22b12d8db8STejun Heo#include <asm/percpu.h>
239900aa2fSH. Peter Anvin#include <asm/nops.h>
247bbcdb1cSAndy Lutomirski#include "../entry/calling.h"
25784d5699SAl Viro#include <asm/export.h>
26bd89004fSPeter Zijlstra#include <asm/nospec-branch.h>
277e75178aSDavid Woodhouse#include <asm/apicdef.h>
2805ab1d8aSFeng Tang#include <asm/fixmap.h>
297e75178aSDavid Woodhouse#include <asm/smp.h>
30250c2277SThomas Gleixner
3175da04f7SThomas Gleixner/*
3275da04f7SThomas Gleixner * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
33250c2277SThomas Gleixner * because we need identity-mapped pages.
34250c2277SThomas Gleixner */
35b9952ec7SKirill A. Shutemov#define l4_index(x)	(((x) >> 39) & 511)
36a6523748SEduardo Habkost#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
37a6523748SEduardo Habkost
38b9952ec7SKirill A. ShutemovL4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
39b9952ec7SKirill A. ShutemovL4_START_KERNEL = l4_index(__START_KERNEL_map)
40b9952ec7SKirill A. Shutemov
41a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map)
42a6523748SEduardo Habkost
43250c2277SThomas Gleixner	.text
444ae59b91STim Abbott	__HEAD
45250c2277SThomas Gleixner	.code64
4637818afdSJiri SlabySYM_CODE_START_NOALIGN(startup_64)
47fb799447SJosh Poimboeuf	UNWIND_HINT_END_OF_STACK
48250c2277SThomas Gleixner	/*
491256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
50250c2277SThomas Gleixner	 * and someone has loaded an identity mapped page table
51250c2277SThomas Gleixner	 * for us.  These identity mapped page tables map all of the
52250c2277SThomas Gleixner	 * kernel pages and possibly all of memory.
53250c2277SThomas Gleixner	 *
54*2f69a81aSArd Biesheuvel	 * %RSI holds the physical address of the boot_params structure
55*2f69a81aSArd Biesheuvel	 * provided by the bootloader. Preserve it in %R15 so C function calls
56*2f69a81aSArd Biesheuvel	 * will not clobber it.
57250c2277SThomas Gleixner	 *
58250c2277SThomas Gleixner	 * We come here either directly from a 64bit bootloader, or from
595b171e82SAlexander Kuleshov	 * arch/x86/boot/compressed/head_64.S.
60250c2277SThomas Gleixner	 *
61250c2277SThomas Gleixner	 * We only come here initially at boot nothing else comes here.
62250c2277SThomas Gleixner	 *
63250c2277SThomas Gleixner	 * Since we may be loaded at an address different from what we were
64250c2277SThomas Gleixner	 * compiled to run at we first fixup the physical addresses in our page
65250c2277SThomas Gleixner	 * tables and then reload them.
66250c2277SThomas Gleixner	 */
67*2f69a81aSArd Biesheuvel	mov	%rsi, %r15
68250c2277SThomas Gleixner
693adee777SBrian Gerst	/* Set up the stack for verify_cpu() */
703adee777SBrian Gerst	leaq	(__end_init_task - PTREGS_SIZE)(%rip), %rsp
7191ed140dSBorislav Petkov
72866b556eSJoerg Roedel	leaq	_text(%rip), %rdi
73469693d8SMichael Roth
748f6be6d8SBrian Gerst	/* Setup GSBASE to allow stack canary access for C code */
75469693d8SMichael Roth	movl	$MSR_GS_BASE, %ecx
768f6be6d8SBrian Gerst	leaq	INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
778f6be6d8SBrian Gerst	movl	%edx, %eax
78469693d8SMichael Roth	shrq	$32,  %rdx
79469693d8SMichael Roth	wrmsr
80469693d8SMichael Roth
81866b556eSJoerg Roedel	call	startup_64_setup_env
82866b556eSJoerg Roedel
83a37f2699STom Lendacky	/* Now switch to __KERNEL_CS so IRET works reliably */
84a37f2699STom Lendacky	pushq	$__KERNEL_CS
85a37f2699STom Lendacky	leaq	.Lon_kernel_cs(%rip), %rax
86a37f2699STom Lendacky	pushq	%rax
87a37f2699STom Lendacky	lretq
88a37f2699STom Lendacky
89a37f2699STom Lendacky.Lon_kernel_cs:
90a37f2699STom Lendacky	UNWIND_HINT_END_OF_STACK
91a37f2699STom Lendacky
92bcce8290SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT
93bcce8290SMichael Roth	/*
94bcce8290SMichael Roth	 * Activate SEV/SME memory encryption if supported/enabled. This needs to
95bcce8290SMichael Roth	 * be done now, since this also includes setup of the SEV-SNP CPUID table,
96bcce8290SMichael Roth	 * which needs to be done before any CPUID instructions are executed in
97*2f69a81aSArd Biesheuvel	 * subsequent code. Pass the boot_params pointer as the first argument.
98bcce8290SMichael Roth	 */
99*2f69a81aSArd Biesheuvel	movq	%r15, %rdi
100bcce8290SMichael Roth	call	sme_enable
101bcce8290SMichael Roth#endif
102bcce8290SMichael Roth
10304633df0SBorislav Petkov	/* Sanitize CPU configuration */
10404633df0SBorislav Petkov	call verify_cpu
10504633df0SBorislav Petkov
1065868f365STom Lendacky	/*
1075868f365STom Lendacky	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
1085868f365STom Lendacky	 * the kernel and retrieve the modifier (SME encryption mask if SME
1095868f365STom Lendacky	 * is active) to be added to the initial pgdir entry that will be
1105868f365STom Lendacky	 * programmed into CR3.
1115868f365STom Lendacky	 */
112250c2277SThomas Gleixner	leaq	_text(%rip), %rdi
113*2f69a81aSArd Biesheuvel	movq	%r15, %rsi
114c88d7150SKirill A. Shutemov	call	__startup_64
115250c2277SThomas Gleixner
1165868f365STom Lendacky	/* Form the CR3 value being sure to include the CR3 modifier */
1175868f365STom Lendacky	addq	$(early_top_pgt - __START_KERNEL_map), %rax
1188170e6beSH. Peter Anvin	jmp 1f
11937818afdSJiri SlabySYM_CODE_END(startup_64)
12037818afdSJiri Slaby
121bc7b11c0SJiri SlabySYM_CODE_START(secondary_startup_64)
122fb799447SJosh Poimboeuf	UNWIND_HINT_END_OF_STACK
1233e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR
124250c2277SThomas Gleixner	/*
1251256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
126250c2277SThomas Gleixner	 * and someone has loaded a mapped page table.
127250c2277SThomas Gleixner	 *
128250c2277SThomas Gleixner	 * We come here either from startup_64 (using physical addresses)
129250c2277SThomas Gleixner	 * or from trampoline.S (using virtual addresses).
130250c2277SThomas Gleixner	 *
131250c2277SThomas Gleixner	 * Using virtual addresses from trampoline.S removes the need
132250c2277SThomas Gleixner	 * to have any identity mapped pages in the kernel page table
133250c2277SThomas Gleixner	 * after the boot processor executes this code.
134250c2277SThomas Gleixner	 */
135250c2277SThomas Gleixner
13604633df0SBorislav Petkov	/* Sanitize CPU configuration */
13704633df0SBorislav Petkov	call verify_cpu
13804633df0SBorislav Petkov
1395868f365STom Lendacky	/*
1403ecacdbdSJoerg Roedel	 * The secondary_startup_64_no_verify entry point is only used by
1413ecacdbdSJoerg Roedel	 * SEV-ES guests. In those guests the call to verify_cpu() would cause
1423ecacdbdSJoerg Roedel	 * #VC exceptions which can not be handled at this stage of secondary
1433ecacdbdSJoerg Roedel	 * CPU bringup.
1443ecacdbdSJoerg Roedel	 *
1453ecacdbdSJoerg Roedel	 * All non SEV-ES systems, especially Intel systems, need to execute
1463ecacdbdSJoerg Roedel	 * verify_cpu() above to make sure NX is enabled.
1473ecacdbdSJoerg Roedel	 */
1483ecacdbdSJoerg RoedelSYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
149fb799447SJosh Poimboeuf	UNWIND_HINT_END_OF_STACK
1503e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR
1513ecacdbdSJoerg Roedel
152*2f69a81aSArd Biesheuvel	/* Clear %R15 which holds the boot_params pointer on the boot CPU */
153*2f69a81aSArd Biesheuvel	xorq	%r15, %r15
154*2f69a81aSArd Biesheuvel
1553ecacdbdSJoerg Roedel	/*
1565868f365STom Lendacky	 * Retrieve the modifier (SME encryption mask if SME is active) to be
1575868f365STom Lendacky	 * added to the initial pgdir entry that will be programmed into CR3.
1585868f365STom Lendacky	 */
159469693d8SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT
160469693d8SMichael Roth	movq	sme_me_mask, %rax
161469693d8SMichael Roth#else
162469693d8SMichael Roth	xorq	%rax, %rax
163469693d8SMichael Roth#endif
1645868f365STom Lendacky
1655868f365STom Lendacky	/* Form the CR3 value being sure to include the CR3 modifier */
1665868f365STom Lendacky	addq	$(init_top_pgt - __START_KERNEL_map), %rax
1678170e6beSH. Peter Anvin1:
1688170e6beSH. Peter Anvin
16977a512e3SSean Christopherson#ifdef CONFIG_X86_MCE
17077a512e3SSean Christopherson	/*
17177a512e3SSean Christopherson	 * Preserve CR4.MCE if the kernel will enable #MC support.
17277a512e3SSean Christopherson	 * Clearing MCE may fault in some environments (that also force #MC
17377a512e3SSean Christopherson	 * support). Any machine check that occurs before #MC support is fully
17477a512e3SSean Christopherson	 * configured will crash the system regardless of the CR4.MCE value set
17577a512e3SSean Christopherson	 * here.
17677a512e3SSean Christopherson	 */
17777a512e3SSean Christopherson	movq	%cr4, %rcx
17877a512e3SSean Christopherson	andl	$X86_CR4_MCE, %ecx
17977a512e3SSean Christopherson#else
18077a512e3SSean Christopherson	movl	$0, %ecx
18177a512e3SSean Christopherson#endif
18277a512e3SSean Christopherson
183032370b9SKirill A. Shutemov	/* Enable PAE mode, PGE and LA57 */
18477a512e3SSean Christopherson	orl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
185032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL
18639b95522SKirill A. Shutemov	testl	$1, __pgtable_l5_enabled(%rip)
1876f9dd329SKirill A. Shutemov	jz	1f
188032370b9SKirill A. Shutemov	orl	$X86_CR4_LA57, %ecx
1896f9dd329SKirill A. Shutemov1:
190032370b9SKirill A. Shutemov#endif
1918170e6beSH. Peter Anvin	movq	%rcx, %cr4
192250c2277SThomas Gleixner
193032370b9SKirill A. Shutemov	/* Setup early boot stage 4-/5-level pagetables. */
194250c2277SThomas Gleixner	addq	phys_base(%rip), %rax
195c9f09539SJoerg Roedel
196c9f09539SJoerg Roedel	/*
197c9f09539SJoerg Roedel	 * For SEV guests: Verify that the C-bit is correct. A malicious
198c9f09539SJoerg Roedel	 * hypervisor could lie about the C-bit position to perform a ROP
199c9f09539SJoerg Roedel	 * attack on the guest by writing to the unencrypted stack and wait for
200c9f09539SJoerg Roedel	 * the next RET instruction.
201c9f09539SJoerg Roedel	 */
202c9f09539SJoerg Roedel	movq	%rax, %rdi
203c9f09539SJoerg Roedel	call	sev_verify_cbit
204c9f09539SJoerg Roedel
205f154f290SJoerg Roedel	/*
206f154f290SJoerg Roedel	 * Switch to new page-table
207f154f290SJoerg Roedel	 *
208f154f290SJoerg Roedel	 * For the boot CPU this switches to early_top_pgt which still has the
209f154f290SJoerg Roedel	 * indentity mappings present. The secondary CPUs will switch to the
210f154f290SJoerg Roedel	 * init_top_pgt here, away from the trampoline_pgd and unmap the
211f154f290SJoerg Roedel	 * indentity mapped ranges.
212f154f290SJoerg Roedel	 */
213250c2277SThomas Gleixner	movq	%rax, %cr3
214250c2277SThomas Gleixner
215f154f290SJoerg Roedel	/*
216f154f290SJoerg Roedel	 * Do a global TLB flush after the CR3 switch to make sure the TLB
217f154f290SJoerg Roedel	 * entries from the identity mapping are flushed.
218f154f290SJoerg Roedel	 */
219f154f290SJoerg Roedel	movq	%cr4, %rcx
220f154f290SJoerg Roedel	movq	%rcx, %rax
221f154f290SJoerg Roedel	xorq	$X86_CR4_PGE, %rcx
222f154f290SJoerg Roedel	movq	%rcx, %cr4
223f154f290SJoerg Roedel	movq	%rax, %cr4
224f154f290SJoerg Roedel
225250c2277SThomas Gleixner	/* Ensure I am executing from virtual addresses */
226250c2277SThomas Gleixner	movq	$1f, %rax
227bd89004fSPeter Zijlstra	ANNOTATE_RETPOLINE_SAFE
228250c2277SThomas Gleixner	jmp	*%rax
229250c2277SThomas Gleixner1:
230fb799447SJosh Poimboeuf	UNWIND_HINT_END_OF_STACK
2313e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR // above
232250c2277SThomas Gleixner
2333adee777SBrian Gerst#ifdef CONFIG_SMP
2347e75178aSDavid Woodhouse	/*
2357e75178aSDavid Woodhouse	 * For parallel boot, the APIC ID is read from the APIC, and then
2367e75178aSDavid Woodhouse	 * used to look up the CPU number.  For booting a single CPU, the
2377e75178aSDavid Woodhouse	 * CPU number is encoded in smpboot_control.
2387e75178aSDavid Woodhouse	 *
2397e75178aSDavid Woodhouse	 * Bit 31	STARTUP_READ_APICID (Read APICID from APIC)
2407e75178aSDavid Woodhouse	 * Bit 0-23	CPU# if STARTUP_xx flags are not set
2417e75178aSDavid Woodhouse	 */
2423adee777SBrian Gerst	movl	smpboot_control(%rip), %ecx
2437e75178aSDavid Woodhouse	testl	$STARTUP_READ_APICID, %ecx
2447e75178aSDavid Woodhouse	jnz	.Lread_apicid
2457e75178aSDavid Woodhouse	/*
2467e75178aSDavid Woodhouse	 * No control bit set, single CPU bringup. CPU number is provided
2477e75178aSDavid Woodhouse	 * in bit 0-23. This is also the boot CPU case (CPU number 0).
2487e75178aSDavid Woodhouse	 */
2497e75178aSDavid Woodhouse	andl	$(~STARTUP_PARALLEL_MASK), %ecx
2507e75178aSDavid Woodhouse	jmp	.Lsetup_cpu
2513adee777SBrian Gerst
2527e75178aSDavid Woodhouse.Lread_apicid:
2537e75178aSDavid Woodhouse	/* Check whether X2APIC mode is already enabled */
2547e75178aSDavid Woodhouse	mov	$MSR_IA32_APICBASE, %ecx
2557e75178aSDavid Woodhouse	rdmsr
2567e75178aSDavid Woodhouse	testl	$X2APIC_ENABLE, %eax
2577e75178aSDavid Woodhouse	jnz	.Lread_apicid_msr
2587e75178aSDavid Woodhouse
2597e75178aSDavid Woodhouse	/* Read the APIC ID from the fix-mapped MMIO space. */
2607e75178aSDavid Woodhouse	movq	apic_mmio_base(%rip), %rcx
2617e75178aSDavid Woodhouse	addq	$APIC_ID, %rcx
2627e75178aSDavid Woodhouse	movl	(%rcx), %eax
2637e75178aSDavid Woodhouse	shr	$24, %eax
2647e75178aSDavid Woodhouse	jmp	.Llookup_AP
2657e75178aSDavid Woodhouse
2667e75178aSDavid Woodhouse.Lread_apicid_msr:
2677e75178aSDavid Woodhouse	mov	$APIC_X2APIC_ID_MSR, %ecx
2687e75178aSDavid Woodhouse	rdmsr
2697e75178aSDavid Woodhouse
2707e75178aSDavid Woodhouse.Llookup_AP:
2717e75178aSDavid Woodhouse	/* EAX contains the APIC ID of the current CPU */
2727e75178aSDavid Woodhouse	xorq	%rcx, %rcx
2737e75178aSDavid Woodhouse	leaq	cpuid_to_apicid(%rip), %rbx
2747e75178aSDavid Woodhouse
2757e75178aSDavid Woodhouse.Lfind_cpunr:
2767e75178aSDavid Woodhouse	cmpl	(%rbx,%rcx,4), %eax
2777e75178aSDavid Woodhouse	jz	.Lsetup_cpu
2787e75178aSDavid Woodhouse	inc	%ecx
2797e75178aSDavid Woodhouse#ifdef CONFIG_FORCE_NR_CPUS
2807e75178aSDavid Woodhouse	cmpl	$NR_CPUS, %ecx
2817e75178aSDavid Woodhouse#else
2827e75178aSDavid Woodhouse	cmpl	nr_cpu_ids(%rip), %ecx
2837e75178aSDavid Woodhouse#endif
2847e75178aSDavid Woodhouse	jb	.Lfind_cpunr
2857e75178aSDavid Woodhouse
2867e75178aSDavid Woodhouse	/*  APIC ID not found in the table. Drop the trampoline lock and bail. */
2877e75178aSDavid Woodhouse	movq	trampoline_lock(%rip), %rax
2887e75178aSDavid Woodhouse	movl	$0, (%rax)
2897e75178aSDavid Woodhouse
2907e75178aSDavid Woodhouse1:	cli
2917e75178aSDavid Woodhouse	hlt
2927e75178aSDavid Woodhouse	jmp	1b
2937e75178aSDavid Woodhouse
2947e75178aSDavid Woodhouse.Lsetup_cpu:
2953adee777SBrian Gerst	/* Get the per cpu offset for the given CPU# which is in ECX */
2963adee777SBrian Gerst	movq	__per_cpu_offset(,%rcx,8), %rdx
2973adee777SBrian Gerst#else
2983adee777SBrian Gerst	xorl	%edx, %edx /* zero-extended to clear all of RDX */
2993adee777SBrian Gerst#endif /* CONFIG_SMP */
3003adee777SBrian Gerst
3013adee777SBrian Gerst	/*
3023adee777SBrian Gerst	 * Setup a boot time stack - Any secondary CPU will have lost its stack
3033adee777SBrian Gerst	 * by now because the cr3-switch above unmaps the real-mode stack.
3043adee777SBrian Gerst	 *
3053adee777SBrian Gerst	 * RDX contains the per-cpu offset
3063adee777SBrian Gerst	 */
3073adee777SBrian Gerst	movq	pcpu_hot + X86_current_task(%rdx), %rax
3083adee777SBrian Gerst	movq	TASK_threadsp(%rax), %rsp
3093adee777SBrian Gerst
310e04b8833SJoerg Roedel	/*
311f6f1ae91SThomas Gleixner	 * Now that this CPU is running on its own stack, drop the realmode
312f6f1ae91SThomas Gleixner	 * protection. For the boot CPU the pointer is NULL!
313f6f1ae91SThomas Gleixner	 */
314f6f1ae91SThomas Gleixner	movq	trampoline_lock(%rip), %rax
315f6f1ae91SThomas Gleixner	testq	%rax, %rax
316f6f1ae91SThomas Gleixner	jz	.Lsetup_gdt
317f6f1ae91SThomas Gleixner	movl	$0, (%rax)
318f6f1ae91SThomas Gleixner
319f6f1ae91SThomas Gleixner.Lsetup_gdt:
320f6f1ae91SThomas Gleixner	/*
321e04b8833SJoerg Roedel	 * We must switch to a new descriptor in kernel space for the GDT
322e04b8833SJoerg Roedel	 * because soon the kernel won't have access anymore to the userspace
323e04b8833SJoerg Roedel	 * addresses where we're currently running on. We have to do that here
324e04b8833SJoerg Roedel	 * because in 32bit we couldn't load a 64bit linear address.
325e04b8833SJoerg Roedel	 */
326c253b640SBrian Gerst	subq	$16, %rsp
327c253b640SBrian Gerst	movw	$(GDT_SIZE-1), (%rsp)
328c253b640SBrian Gerst	leaq	gdt_page(%rdx), %rax
329c253b640SBrian Gerst	movq	%rax, 2(%rsp)
330c253b640SBrian Gerst	lgdt	(%rsp)
331c253b640SBrian Gerst	addq	$16, %rsp
332e04b8833SJoerg Roedel
3337b99819dSJoerg Roedel	/* set up data segments */
3347b99819dSJoerg Roedel	xorl %eax,%eax
3357b99819dSJoerg Roedel	movl %eax,%ds
3367b99819dSJoerg Roedel	movl %eax,%ss
3377b99819dSJoerg Roedel	movl %eax,%es
3387b99819dSJoerg Roedel
3397b99819dSJoerg Roedel	/*
3407b99819dSJoerg Roedel	 * We don't really need to load %fs or %gs, but load them anyway
3417b99819dSJoerg Roedel	 * to kill any stale realmode selectors.  This allows execution
3427b99819dSJoerg Roedel	 * under VT hardware.
3437b99819dSJoerg Roedel	 */
3447b99819dSJoerg Roedel	movl %eax,%fs
3457b99819dSJoerg Roedel	movl %eax,%gs
3467b99819dSJoerg Roedel
3477b99819dSJoerg Roedel	/* Set up %gs.
3487b99819dSJoerg Roedel	 *
3497b99819dSJoerg Roedel	 * The base of %gs always points to fixed_percpu_data. If the
3507b99819dSJoerg Roedel	 * stack protector canary is enabled, it is located at %gs:40.
3517b99819dSJoerg Roedel	 * Note that, on SMP, the boot cpu uses init data section until
3527b99819dSJoerg Roedel	 * the per cpu areas are set up.
3537b99819dSJoerg Roedel	 */
3547b99819dSJoerg Roedel	movl	$MSR_GS_BASE,%ecx
3558f6be6d8SBrian Gerst#ifndef CONFIG_SMP
3568f6be6d8SBrian Gerst	leaq	INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
3578f6be6d8SBrian Gerst#endif
3588f6be6d8SBrian Gerst	movl	%edx, %eax
3598f6be6d8SBrian Gerst	shrq	$32, %rdx
3607b99819dSJoerg Roedel	wrmsr
3617b99819dSJoerg Roedel
362f5963ba7SJoerg Roedel	/* Setup and Load IDT */
363f5963ba7SJoerg Roedel	call	early_setup_idt
364f5963ba7SJoerg Roedel
365250c2277SThomas Gleixner	/* Check if nx is implemented */
366250c2277SThomas Gleixner	movl	$0x80000001, %eax
367250c2277SThomas Gleixner	cpuid
368250c2277SThomas Gleixner	movl	%edx,%edi
369250c2277SThomas Gleixner
370250c2277SThomas Gleixner	/* Setup EFER (Extended Feature Enable Register) */
371250c2277SThomas Gleixner	movl	$MSR_EFER, %ecx
372250c2277SThomas Gleixner	rdmsr
37377a512e3SSean Christopherson	/*
37477a512e3SSean Christopherson	 * Preserve current value of EFER for comparison and to skip
37577a512e3SSean Christopherson	 * EFER writes if no change was made (for TDX guest)
37677a512e3SSean Christopherson	 */
37777a512e3SSean Christopherson	movl    %eax, %edx
378250c2277SThomas Gleixner	btsl	$_EFER_SCE, %eax	/* Enable System Call */
379250c2277SThomas Gleixner	btl	$20,%edi		/* No Execute supported? */
380250c2277SThomas Gleixner	jnc     1f
381250c2277SThomas Gleixner	btsl	$_EFER_NX, %eax
38278d77df7SH. Peter Anvin	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
383250c2277SThomas Gleixner
38477a512e3SSean Christopherson	/* Avoid writing EFER if no change was made (for TDX guest) */
38577a512e3SSean Christopherson1:	cmpl	%edx, %eax
38677a512e3SSean Christopherson	je	1f
38777a512e3SSean Christopherson	xor	%edx, %edx
38877a512e3SSean Christopherson	wrmsr				/* Make changes effective */
38977a512e3SSean Christopherson1:
390250c2277SThomas Gleixner	/* Setup cr0 */
391369101daSCyrill Gorcunov	movl	$CR0_STATE, %eax
392250c2277SThomas Gleixner	/* Make changes effective */
393250c2277SThomas Gleixner	movq	%rax, %cr0
394250c2277SThomas Gleixner
395250c2277SThomas Gleixner	/* zero EFLAGS after setting rsp */
396250c2277SThomas Gleixner	pushq $0
397250c2277SThomas Gleixner	popfq
398250c2277SThomas Gleixner
399*2f69a81aSArd Biesheuvel	/* Pass the boot_params pointer as first argument */
400*2f69a81aSArd Biesheuvel	movq	%r15, %rdi
401250c2277SThomas Gleixner
40279d243a0SBorislav Petkov.Ljump_to_C_code:
403a9468df5SJosh Poimboeuf	/*
404a9468df5SJosh Poimboeuf	 * Jump to run C code and to be on a real kernel address.
405250c2277SThomas Gleixner	 * Since we are running on identity-mapped space we have to jump
406250c2277SThomas Gleixner	 * to the full 64bit address, this is only possible as indirect
407250c2277SThomas Gleixner	 * jump.  In addition we need to ensure %cs is set so we make this
408250c2277SThomas Gleixner	 * a far return.
4098170e6beSH. Peter Anvin	 *
4108170e6beSH. Peter Anvin	 * Note: do not change to far jump indirect with 64bit offset.
4118170e6beSH. Peter Anvin	 *
4128170e6beSH. Peter Anvin	 * AMD does not support far jump indirect with 64bit offset.
4138170e6beSH. Peter Anvin	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
4148170e6beSH. Peter Anvin	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
4158170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
4168170e6beSH. Peter Anvin	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
4178170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
4188170e6beSH. Peter Anvin	 *
4198170e6beSH. Peter Anvin	 * Intel64 does support 64bit offset.
4208170e6beSH. Peter Anvin	 * Software Developer Manual Vol 2: states:
4218170e6beSH. Peter Anvin	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
4228170e6beSH. Peter Anvin	 *		address given in m16:16
4238170e6beSH. Peter Anvin	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
4248170e6beSH. Peter Anvin	 *		address given in m16:32.
4258170e6beSH. Peter Anvin	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
4268170e6beSH. Peter Anvin	 *		address given in m16:64.
427250c2277SThomas Gleixner	 */
42831dcfec1SJosh Poimboeuf	pushq	$.Lafter_lret	# put return address on stack for unwinder
429a7bea830SJan Beulich	xorl	%ebp, %ebp	# clear frame pointer
430250c2277SThomas Gleixner	movq	initial_code(%rip), %rax
431250c2277SThomas Gleixner	pushq	$__KERNEL_CS	# set correct cs
432250c2277SThomas Gleixner	pushq	%rax		# target address in negative space
433250c2277SThomas Gleixner	lretq
43431dcfec1SJosh Poimboeuf.Lafter_lret:
4353e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR
436bc7b11c0SJiri SlabySYM_CODE_END(secondary_startup_64)
437250c2277SThomas Gleixner
43804633df0SBorislav Petkov#include "verify_cpu.S"
439c9f09539SJoerg Roedel#include "sev_verify_cbit.S"
44004633df0SBorislav Petkov
441cded3679SThomas Gleixner#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
44242e78e97SFenghua Yu/*
443666e1156SThomas Gleixner * Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for
444666e1156SThomas Gleixner * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot
445666e1156SThomas Gleixner * unplug. Everything is set up already except the stack.
44642e78e97SFenghua Yu */
447666e1156SThomas GleixnerSYM_CODE_START(soft_restart_cpu)
448e81dc127SThomas Gleixner	ANNOTATE_NOENDBR
449fb799447SJosh Poimboeuf	UNWIND_HINT_END_OF_STACK
4503adee777SBrian Gerst
4513adee777SBrian Gerst	/* Find the idle task stack */
4523adee777SBrian Gerst	movq	PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx
4533adee777SBrian Gerst	movq	TASK_threadsp(%rcx), %rsp
4543adee777SBrian Gerst
45579d243a0SBorislav Petkov	jmp	.Ljump_to_C_code
456666e1156SThomas GleixnerSYM_CODE_END(soft_restart_cpu)
45742e78e97SFenghua Yu#endif
45842e78e97SFenghua Yu
4591aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT
4601aa9aa8eSJoerg Roedel/*
4611aa9aa8eSJoerg Roedel * VC Exception handler used during early boot when running on kernel
4621aa9aa8eSJoerg Roedel * addresses, but before the switch to the idt_table can be made.
4631aa9aa8eSJoerg Roedel * The early_idt_handler_array can't be used here because it calls into a lot
4641aa9aa8eSJoerg Roedel * of __init code and this handler is also used during CPU offlining/onlining.
4651aa9aa8eSJoerg Roedel * Therefore this handler ends up in the .text section so that it stays around
4661aa9aa8eSJoerg Roedel * when .init.text is freed.
4671aa9aa8eSJoerg Roedel */
4681aa9aa8eSJoerg RoedelSYM_CODE_START_NOALIGN(vc_boot_ghcb)
4691aa9aa8eSJoerg Roedel	UNWIND_HINT_IRET_REGS offset=8
470e8d61bdfSPeter Zijlstra	ENDBR
4711aa9aa8eSJoerg Roedel
4721aa9aa8eSJoerg Roedel	/* Build pt_regs */
4731aa9aa8eSJoerg Roedel	PUSH_AND_CLEAR_REGS
4741aa9aa8eSJoerg Roedel
4751aa9aa8eSJoerg Roedel	/* Call C handler */
4761aa9aa8eSJoerg Roedel	movq    %rsp, %rdi
4771aa9aa8eSJoerg Roedel	movq	ORIG_RAX(%rsp), %rsi
4781aa9aa8eSJoerg Roedel	movq	initial_vc_handler(%rip), %rax
4791aa9aa8eSJoerg Roedel	ANNOTATE_RETPOLINE_SAFE
4801aa9aa8eSJoerg Roedel	call	*%rax
4811aa9aa8eSJoerg Roedel
4821aa9aa8eSJoerg Roedel	/* Unwind pt_regs */
4831aa9aa8eSJoerg Roedel	POP_REGS
4841aa9aa8eSJoerg Roedel
4851aa9aa8eSJoerg Roedel	/* Remove Error Code */
4861aa9aa8eSJoerg Roedel	addq    $8, %rsp
4871aa9aa8eSJoerg Roedel
4881aa9aa8eSJoerg Roedel	iretq
4891aa9aa8eSJoerg RoedelSYM_CODE_END(vc_boot_ghcb)
4901aa9aa8eSJoerg Roedel#endif
4911aa9aa8eSJoerg Roedel
492b32f96c7SJosh Poimboeuf	/* Both SMP bootup and ACPI suspend change these variables */
493da5968aeSSam Ravnborg	__REFDATA
4948170e6beSH. Peter Anvin	.balign	8
495b1bd27b9SJiri SlabySYM_DATA(initial_code,	.quad x86_64_start_kernel)
4961aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT
4971aa9aa8eSJoerg RoedelSYM_DATA(initial_vc_handler,	.quad handle_vc_boot_ghcb)
4981aa9aa8eSJoerg Roedel#endif
499f6f1ae91SThomas Gleixner
500f6f1ae91SThomas GleixnerSYM_DATA(trampoline_lock, .quad 0);
501b9af7c0dSSuresh Siddha	__FINITDATA
502250c2277SThomas Gleixner
5038170e6beSH. Peter Anvin	__INIT
504bc7b11c0SJiri SlabySYM_CODE_START(early_idt_handler_array)
505749c970aSAndi Kleen	i = 0
506749c970aSAndi Kleen	.rept NUM_EXCEPTION_VECTORS
50782c62fa0SJosh Poimboeuf	.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
5082704fbb6SJosh Poimboeuf		UNWIND_HINT_IRET_REGS
5098f93402bSPeter Zijlstra		ENDBR
5109900aa2fSH. Peter Anvin		pushq $0	# Dummy error code, to make stack frame uniform
5112704fbb6SJosh Poimboeuf	.else
5122704fbb6SJosh Poimboeuf		UNWIND_HINT_IRET_REGS offset=8
5138f93402bSPeter Zijlstra		ENDBR
5149900aa2fSH. Peter Anvin	.endif
5159900aa2fSH. Peter Anvin	pushq $i		# 72(%rsp) Vector number
516cdeb6048SAndy Lutomirski	jmp early_idt_handler_common
5172704fbb6SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
518749c970aSAndi Kleen	i = i + 1
519cdeb6048SAndy Lutomirski	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
520749c970aSAndi Kleen	.endr
521bc7b11c0SJiri SlabySYM_CODE_END(early_idt_handler_array)
5225b2fc515SPeter Zijlstra	ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
5238866cd9dSRoland McGrath
524ef77e688SJiri SlabySYM_CODE_START_LOCAL(early_idt_handler_common)
5258f93402bSPeter Zijlstra	UNWIND_HINT_IRET_REGS offset=16
526cdeb6048SAndy Lutomirski	/*
527cdeb6048SAndy Lutomirski	 * The stack is the hardware frame, an error code or zero, and the
528cdeb6048SAndy Lutomirski	 * vector number.
529cdeb6048SAndy Lutomirski	 */
5309900aa2fSH. Peter Anvin	cld
5319900aa2fSH. Peter Anvin
532250c2277SThomas Gleixner	incl early_recursion_flag(%rip)
5339900aa2fSH. Peter Anvin
5347bbcdb1cSAndy Lutomirski	/* The vector number is currently in the pt_regs->di slot. */
5357bbcdb1cSAndy Lutomirski	pushq %rsi				/* pt_regs->si */
5367bbcdb1cSAndy Lutomirski	movq 8(%rsp), %rsi			/* RSI = vector number */
5377bbcdb1cSAndy Lutomirski	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
5387bbcdb1cSAndy Lutomirski	pushq %rdx				/* pt_regs->dx */
5397bbcdb1cSAndy Lutomirski	pushq %rcx				/* pt_regs->cx */
5407bbcdb1cSAndy Lutomirski	pushq %rax				/* pt_regs->ax */
5417bbcdb1cSAndy Lutomirski	pushq %r8				/* pt_regs->r8 */
5427bbcdb1cSAndy Lutomirski	pushq %r9				/* pt_regs->r9 */
5437bbcdb1cSAndy Lutomirski	pushq %r10				/* pt_regs->r10 */
5447bbcdb1cSAndy Lutomirski	pushq %r11				/* pt_regs->r11 */
5457bbcdb1cSAndy Lutomirski	pushq %rbx				/* pt_regs->bx */
5467bbcdb1cSAndy Lutomirski	pushq %rbp				/* pt_regs->bp */
5477bbcdb1cSAndy Lutomirski	pushq %r12				/* pt_regs->r12 */
5487bbcdb1cSAndy Lutomirski	pushq %r13				/* pt_regs->r13 */
5497bbcdb1cSAndy Lutomirski	pushq %r14				/* pt_regs->r14 */
5507bbcdb1cSAndy Lutomirski	pushq %r15				/* pt_regs->r15 */
5512704fbb6SJosh Poimboeuf	UNWIND_HINT_REGS
5529900aa2fSH. Peter Anvin
5537bbcdb1cSAndy Lutomirski	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
5544b47cdbdSJoerg Roedel	call do_early_exception
5559900aa2fSH. Peter Anvin
5569900aa2fSH. Peter Anvin	decl early_recursion_flag(%rip)
55726c4ef9cSAndy Lutomirski	jmp restore_regs_and_return_to_kernel
558ef77e688SJiri SlabySYM_CODE_END(early_idt_handler_common)
5599900aa2fSH. Peter Anvin
56074d8d9d5SJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT
56174d8d9d5SJoerg Roedel/*
56274d8d9d5SJoerg Roedel * VC Exception handler used during very early boot. The
56374d8d9d5SJoerg Roedel * early_idt_handler_array can't be used because it returns via the
56474d8d9d5SJoerg Roedel * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
56574d8d9d5SJoerg Roedel *
5668b87d8ceSPeter Zijlstra * XXX it does, fix this.
5678b87d8ceSPeter Zijlstra *
56874d8d9d5SJoerg Roedel * This handler will end up in the .init.text section and not be
56974d8d9d5SJoerg Roedel * available to boot secondary CPUs.
57074d8d9d5SJoerg Roedel */
57174d8d9d5SJoerg RoedelSYM_CODE_START_NOALIGN(vc_no_ghcb)
57274d8d9d5SJoerg Roedel	UNWIND_HINT_IRET_REGS offset=8
573e8d61bdfSPeter Zijlstra	ENDBR
57474d8d9d5SJoerg Roedel
57574d8d9d5SJoerg Roedel	/* Build pt_regs */
57674d8d9d5SJoerg Roedel	PUSH_AND_CLEAR_REGS
57774d8d9d5SJoerg Roedel
57874d8d9d5SJoerg Roedel	/* Call C handler */
57974d8d9d5SJoerg Roedel	movq    %rsp, %rdi
58074d8d9d5SJoerg Roedel	movq	ORIG_RAX(%rsp), %rsi
58174d8d9d5SJoerg Roedel	call    do_vc_no_ghcb
58274d8d9d5SJoerg Roedel
58374d8d9d5SJoerg Roedel	/* Unwind pt_regs */
58474d8d9d5SJoerg Roedel	POP_REGS
58574d8d9d5SJoerg Roedel
58674d8d9d5SJoerg Roedel	/* Remove Error Code */
58774d8d9d5SJoerg Roedel	addq    $8, %rsp
58874d8d9d5SJoerg Roedel
58974d8d9d5SJoerg Roedel	/* Pure iret required here - don't use INTERRUPT_RETURN */
59074d8d9d5SJoerg Roedel	iretq
59174d8d9d5SJoerg RoedelSYM_CODE_END(vc_no_ghcb)
59274d8d9d5SJoerg Roedel#endif
593b1bd27b9SJiri Slaby
594b1bd27b9SJiri Slaby#define SYM_DATA_START_PAGE_ALIGNED(name)			\
595b1bd27b9SJiri Slaby	SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
596250c2277SThomas Gleixner
597d9e9a641SDave Hansen#ifdef CONFIG_PAGE_TABLE_ISOLATION
598d9e9a641SDave Hansen/*
599d9e9a641SDave Hansen * Each PGD needs to be 8k long and 8k aligned.  We do not
600d9e9a641SDave Hansen * ever go out to userspace with these, so we do not
601d9e9a641SDave Hansen * strictly *need* the second page, but this allows us to
602d9e9a641SDave Hansen * have a single set_pgd() implementation that does not
603d9e9a641SDave Hansen * need to worry about whether it has 4k or 8k to work
604d9e9a641SDave Hansen * with.
605d9e9a641SDave Hansen *
606d9e9a641SDave Hansen * This ensures PGDs are 8k long:
607d9e9a641SDave Hansen */
608d9e9a641SDave Hansen#define PTI_USER_PGD_FILL	512
609d9e9a641SDave Hansen/* This ensures they are 8k-aligned: */
610b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \
611b1bd27b9SJiri Slaby	SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
612d9e9a641SDave Hansen#else
613b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \
614b1bd27b9SJiri Slaby	SYM_DATA_START_PAGE_ALIGNED(name)
615d9e9a641SDave Hansen#define PTI_USER_PGD_FILL	0
616d9e9a641SDave Hansen#endif
617d9e9a641SDave Hansen
618250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */
619250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT)			\
620250c2277SThomas Gleixner	i = 0 ;						\
621250c2277SThomas Gleixner	.rept (COUNT) ;					\
6220e192b99SCyrill Gorcunov	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
623250c2277SThomas Gleixner	i = i + 1 ;					\
624250c2277SThomas Gleixner	.endr
625250c2277SThomas Gleixner
6268170e6beSH. Peter Anvin	__INITDATA
6271a8770b7SJiri Slaby	.balign 4
6281a8770b7SJiri Slaby
629b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(early_top_pgt)
6306f9dd329SKirill A. Shutemov	.fill	512,8,0
631d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
632b1bd27b9SJiri SlabySYM_DATA_END(early_top_pgt)
6338170e6beSH. Peter Anvin
634b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
6358170e6beSH. Peter Anvin	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
636b1bd27b9SJiri SlabySYM_DATA_END(early_dynamic_pgts)
6378170e6beSH. Peter Anvin
638b1bd27b9SJiri SlabySYM_DATA(early_recursion_flag, .long 0)
6391a8770b7SJiri Slaby
640b9af7c0dSSuresh Siddha	.data
6418170e6beSH. Peter Anvin
6427733607fSMaran Wilson#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
643b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt)
64421729f81STom Lendacky	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
645b9952ec7SKirill A. Shutemov	.org    init_top_pgt + L4_PAGE_OFFSET*8, 0
64621729f81STom Lendacky	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
647b9952ec7SKirill A. Shutemov	.org    init_top_pgt + L4_START_KERNEL*8, 0
648250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
64921729f81STom Lendacky	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
650d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
651b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt)
652250c2277SThomas Gleixner
653b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
65421729f81STom Lendacky	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
655250c2277SThomas Gleixner	.fill	511, 8, 0
656b1bd27b9SJiri SlabySYM_DATA_END(level3_ident_pgt)
657b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
658430d4005SDave Hansen	/*
659430d4005SDave Hansen	 * Since I easily can, map the first 1G.
6608170e6beSH. Peter Anvin	 * Don't set NX because code runs from these pages.
661430d4005SDave Hansen	 *
662430d4005SDave Hansen	 * Note: This sets _PAGE_GLOBAL despite whether
663430d4005SDave Hansen	 * the CPU supports it or it is enabled.  But,
664430d4005SDave Hansen	 * the CPU should ignore the bit.
6658170e6beSH. Peter Anvin	 */
6668170e6beSH. Peter Anvin	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
667b1bd27b9SJiri SlabySYM_DATA_END(level2_ident_pgt)
6684375c299SKirill A. Shutemov#else
669b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt)
6704375c299SKirill A. Shutemov	.fill	512,8,0
671d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
672b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt)
6738170e6beSH. Peter Anvin#endif
674250c2277SThomas Gleixner
675032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL
676b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
677032370b9SKirill A. Shutemov	.fill	511,8,0
67821729f81STom Lendacky	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
679b1bd27b9SJiri SlabySYM_DATA_END(level4_kernel_pgt)
680032370b9SKirill A. Shutemov#endif
681032370b9SKirill A. Shutemov
682b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
683a6523748SEduardo Habkost	.fill	L3_START_KERNEL,8,0
684250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
68521729f81STom Lendacky	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
68621729f81STom Lendacky	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
687b1bd27b9SJiri SlabySYM_DATA_END(level3_kernel_pgt)
688250c2277SThomas Gleixner
689b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
69088f3aec7SIngo Molnar	/*
691ea3186b9SArvind Sankar	 * Kernel high mapping.
69288f3aec7SIngo Molnar	 *
693ea3186b9SArvind Sankar	 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
694ea3186b9SArvind Sankar	 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
695ea3186b9SArvind Sankar	 * 512 MiB otherwise.
69688f3aec7SIngo Molnar	 *
697ea3186b9SArvind Sankar	 * (NOTE: after that starts the module area, see MODULES_VADDR.)
698430d4005SDave Hansen	 *
699ea3186b9SArvind Sankar	 * This table is eventually used by the kernel during normal runtime.
700ea3186b9SArvind Sankar	 * Care must be taken to clear out undesired bits later, like _PAGE_RW
701ea3186b9SArvind Sankar	 * or _PAGE_GLOBAL in some cases.
70288f3aec7SIngo Molnar	 */
703ea3186b9SArvind Sankar	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
704b1bd27b9SJiri SlabySYM_DATA_END(level2_kernel_pgt)
705250c2277SThomas Gleixner
706b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
70705ab1d8aSFeng Tang	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
70805ab1d8aSFeng Tang	pgtno = 0
70905ab1d8aSFeng Tang	.rept (FIXMAP_PMD_NUM)
71005ab1d8aSFeng Tang	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
71105ab1d8aSFeng Tang		+ _PAGE_TABLE_NOENC;
71205ab1d8aSFeng Tang	pgtno = pgtno + 1
71305ab1d8aSFeng Tang	.endr
71405ab1d8aSFeng Tang	/* 6 MB reserved space + a 2MB hole */
71505ab1d8aSFeng Tang	.fill	4,8,0
716b1bd27b9SJiri SlabySYM_DATA_END(level2_fixmap_pgt)
7178170e6beSH. Peter Anvin
718b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
71905ab1d8aSFeng Tang	.rept (FIXMAP_PMD_NUM)
720250c2277SThomas Gleixner	.fill	512,8,0
72105ab1d8aSFeng Tang	.endr
722b1bd27b9SJiri SlabySYM_DATA_END(level1_fixmap_pgt)
723250c2277SThomas Gleixner
724250c2277SThomas Gleixner#undef PMDS
725250c2277SThomas Gleixner
726250c2277SThomas Gleixner	.data
727250c2277SThomas Gleixner	.align 16
728250c2277SThomas Gleixner
7293adee777SBrian GerstSYM_DATA(smpboot_control,		.long 0)
7303adee777SBrian Gerst
7313adee777SBrian Gerst	.align 16
732250c2277SThomas Gleixner/* This must match the first entry in level2_kernel_pgt */
733b1bd27b9SJiri SlabySYM_DATA(phys_base, .quad 0x0)
734784d5699SAl ViroEXPORT_SYMBOL(phys_base)
735250c2277SThomas Gleixner
7368c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S"
737250c2277SThomas Gleixner
73802b7da37STim Abbott	__PAGE_ALIGNED_BSS
739b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
740250c2277SThomas Gleixner	.skip PAGE_SIZE
741b1bd27b9SJiri SlabySYM_DATA_END(empty_zero_page)
742784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page)
743ef7f0d6aSAndrey Ryabinin
744