xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision c253b640)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
2250c2277SThomas Gleixner/*
35b171e82SAlexander Kuleshov *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4250c2277SThomas Gleixner *
5250c2277SThomas Gleixner *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6250c2277SThomas Gleixner *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7250c2277SThomas Gleixner *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8250c2277SThomas Gleixner *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9250c2277SThomas Gleixner *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10250c2277SThomas Gleixner */
11250c2277SThomas Gleixner
12250c2277SThomas Gleixner
13250c2277SThomas Gleixner#include <linux/linkage.h>
14250c2277SThomas Gleixner#include <linux/threads.h>
15250c2277SThomas Gleixner#include <linux/init.h>
16ca5999fdSMike Rapoport#include <linux/pgtable.h>
1765fddcfcSMike Rapoport#include <asm/segment.h>
18250c2277SThomas Gleixner#include <asm/page.h>
19250c2277SThomas Gleixner#include <asm/msr.h>
20250c2277SThomas Gleixner#include <asm/cache.h>
21369101daSCyrill Gorcunov#include <asm/processor-flags.h>
22b12d8db8STejun Heo#include <asm/percpu.h>
239900aa2fSH. Peter Anvin#include <asm/nops.h>
247bbcdb1cSAndy Lutomirski#include "../entry/calling.h"
25784d5699SAl Viro#include <asm/export.h>
26bd89004fSPeter Zijlstra#include <asm/nospec-branch.h>
2705ab1d8aSFeng Tang#include <asm/fixmap.h>
28250c2277SThomas Gleixner
2975da04f7SThomas Gleixner/*
3075da04f7SThomas Gleixner * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
31250c2277SThomas Gleixner * because we need identity-mapped pages.
32250c2277SThomas Gleixner */
33b9952ec7SKirill A. Shutemov#define l4_index(x)	(((x) >> 39) & 511)
34a6523748SEduardo Habkost#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
35a6523748SEduardo Habkost
36b9952ec7SKirill A. ShutemovL4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
37b9952ec7SKirill A. ShutemovL4_START_KERNEL = l4_index(__START_KERNEL_map)
38b9952ec7SKirill A. Shutemov
39a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map)
40a6523748SEduardo Habkost
41250c2277SThomas Gleixner	.text
424ae59b91STim Abbott	__HEAD
43250c2277SThomas Gleixner	.code64
4437818afdSJiri SlabySYM_CODE_START_NOALIGN(startup_64)
452704fbb6SJosh Poimboeuf	UNWIND_HINT_EMPTY
46250c2277SThomas Gleixner	/*
471256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
48250c2277SThomas Gleixner	 * and someone has loaded an identity mapped page table
49250c2277SThomas Gleixner	 * for us.  These identity mapped page tables map all of the
50250c2277SThomas Gleixner	 * kernel pages and possibly all of memory.
51250c2277SThomas Gleixner	 *
528170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
53250c2277SThomas Gleixner	 *
54250c2277SThomas Gleixner	 * We come here either directly from a 64bit bootloader, or from
555b171e82SAlexander Kuleshov	 * arch/x86/boot/compressed/head_64.S.
56250c2277SThomas Gleixner	 *
57250c2277SThomas Gleixner	 * We only come here initially at boot nothing else comes here.
58250c2277SThomas Gleixner	 *
59250c2277SThomas Gleixner	 * Since we may be loaded at an address different from what we were
60250c2277SThomas Gleixner	 * compiled to run at we first fixup the physical addresses in our page
61250c2277SThomas Gleixner	 * tables and then reload them.
62250c2277SThomas Gleixner	 */
63250c2277SThomas Gleixner
643adee777SBrian Gerst	/* Set up the stack for verify_cpu() */
653adee777SBrian Gerst	leaq	(__end_init_task - PTREGS_SIZE)(%rip), %rsp
6691ed140dSBorislav Petkov
67866b556eSJoerg Roedel	leaq	_text(%rip), %rdi
68469693d8SMichael Roth
69469693d8SMichael Roth	/*
70469693d8SMichael Roth	 * initial_gs points to initial fixed_percpu_data struct with storage for
71469693d8SMichael Roth	 * the stack protector canary. Global pointer fixups are needed at this
72469693d8SMichael Roth	 * stage, so apply them as is done in fixup_pointer(), and initialize %gs
73469693d8SMichael Roth	 * such that the canary can be accessed at %gs:40 for subsequent C calls.
74469693d8SMichael Roth	 */
75469693d8SMichael Roth	movl	$MSR_GS_BASE, %ecx
76469693d8SMichael Roth	movq	initial_gs(%rip), %rax
77469693d8SMichael Roth	movq	$_text, %rdx
78469693d8SMichael Roth	subq	%rdx, %rax
79469693d8SMichael Roth	addq	%rdi, %rax
80469693d8SMichael Roth	movq	%rax, %rdx
81469693d8SMichael Roth	shrq	$32,  %rdx
82469693d8SMichael Roth	wrmsr
83469693d8SMichael Roth
84866b556eSJoerg Roedel	pushq	%rsi
85866b556eSJoerg Roedel	call	startup_64_setup_env
86866b556eSJoerg Roedel	popq	%rsi
87866b556eSJoerg Roedel
88bcce8290SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT
89bcce8290SMichael Roth	/*
90bcce8290SMichael Roth	 * Activate SEV/SME memory encryption if supported/enabled. This needs to
91bcce8290SMichael Roth	 * be done now, since this also includes setup of the SEV-SNP CPUID table,
92bcce8290SMichael Roth	 * which needs to be done before any CPUID instructions are executed in
93bcce8290SMichael Roth	 * subsequent code.
94bcce8290SMichael Roth	 */
95bcce8290SMichael Roth	movq	%rsi, %rdi
96bcce8290SMichael Roth	pushq	%rsi
97bcce8290SMichael Roth	call	sme_enable
98bcce8290SMichael Roth	popq	%rsi
99bcce8290SMichael Roth#endif
100bcce8290SMichael Roth
101866b556eSJoerg Roedel	/* Now switch to __KERNEL_CS so IRET works reliably */
102866b556eSJoerg Roedel	pushq	$__KERNEL_CS
103866b556eSJoerg Roedel	leaq	.Lon_kernel_cs(%rip), %rax
104866b556eSJoerg Roedel	pushq	%rax
105866b556eSJoerg Roedel	lretq
106866b556eSJoerg Roedel
107866b556eSJoerg Roedel.Lon_kernel_cs:
108866b556eSJoerg Roedel	UNWIND_HINT_EMPTY
109866b556eSJoerg Roedel
11004633df0SBorislav Petkov	/* Sanitize CPU configuration */
11104633df0SBorislav Petkov	call verify_cpu
11204633df0SBorislav Petkov
1135868f365STom Lendacky	/*
1145868f365STom Lendacky	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
1155868f365STom Lendacky	 * the kernel and retrieve the modifier (SME encryption mask if SME
1165868f365STom Lendacky	 * is active) to be added to the initial pgdir entry that will be
1175868f365STom Lendacky	 * programmed into CR3.
1185868f365STom Lendacky	 */
119250c2277SThomas Gleixner	leaq	_text(%rip), %rdi
120c88d7150SKirill A. Shutemov	pushq	%rsi
121c88d7150SKirill A. Shutemov	call	__startup_64
122c88d7150SKirill A. Shutemov	popq	%rsi
123250c2277SThomas Gleixner
1245868f365STom Lendacky	/* Form the CR3 value being sure to include the CR3 modifier */
1255868f365STom Lendacky	addq	$(early_top_pgt - __START_KERNEL_map), %rax
1268170e6beSH. Peter Anvin	jmp 1f
12737818afdSJiri SlabySYM_CODE_END(startup_64)
12837818afdSJiri Slaby
129bc7b11c0SJiri SlabySYM_CODE_START(secondary_startup_64)
1302704fbb6SJosh Poimboeuf	UNWIND_HINT_EMPTY
1313e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR
132250c2277SThomas Gleixner	/*
1331256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
134250c2277SThomas Gleixner	 * and someone has loaded a mapped page table.
135250c2277SThomas Gleixner	 *
1368170e6beSH. Peter Anvin	 * %rsi holds a physical pointer to real_mode_data.
137250c2277SThomas Gleixner	 *
138250c2277SThomas Gleixner	 * We come here either from startup_64 (using physical addresses)
139250c2277SThomas Gleixner	 * or from trampoline.S (using virtual addresses).
140250c2277SThomas Gleixner	 *
141250c2277SThomas Gleixner	 * Using virtual addresses from trampoline.S removes the need
142250c2277SThomas Gleixner	 * to have any identity mapped pages in the kernel page table
143250c2277SThomas Gleixner	 * after the boot processor executes this code.
144250c2277SThomas Gleixner	 */
145250c2277SThomas Gleixner
14604633df0SBorislav Petkov	/* Sanitize CPU configuration */
14704633df0SBorislav Petkov	call verify_cpu
14804633df0SBorislav Petkov
1495868f365STom Lendacky	/*
1503ecacdbdSJoerg Roedel	 * The secondary_startup_64_no_verify entry point is only used by
1513ecacdbdSJoerg Roedel	 * SEV-ES guests. In those guests the call to verify_cpu() would cause
1523ecacdbdSJoerg Roedel	 * #VC exceptions which can not be handled at this stage of secondary
1533ecacdbdSJoerg Roedel	 * CPU bringup.
1543ecacdbdSJoerg Roedel	 *
1553ecacdbdSJoerg Roedel	 * All non SEV-ES systems, especially Intel systems, need to execute
1563ecacdbdSJoerg Roedel	 * verify_cpu() above to make sure NX is enabled.
1573ecacdbdSJoerg Roedel	 */
1583ecacdbdSJoerg RoedelSYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
1593ecacdbdSJoerg Roedel	UNWIND_HINT_EMPTY
1603e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR
1613ecacdbdSJoerg Roedel
1623ecacdbdSJoerg Roedel	/*
1635868f365STom Lendacky	 * Retrieve the modifier (SME encryption mask if SME is active) to be
1645868f365STom Lendacky	 * added to the initial pgdir entry that will be programmed into CR3.
1655868f365STom Lendacky	 */
166469693d8SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT
167469693d8SMichael Roth	movq	sme_me_mask, %rax
168469693d8SMichael Roth#else
169469693d8SMichael Roth	xorq	%rax, %rax
170469693d8SMichael Roth#endif
1715868f365STom Lendacky
1725868f365STom Lendacky	/* Form the CR3 value being sure to include the CR3 modifier */
1735868f365STom Lendacky	addq	$(init_top_pgt - __START_KERNEL_map), %rax
1748170e6beSH. Peter Anvin1:
1758170e6beSH. Peter Anvin
17677a512e3SSean Christopherson#ifdef CONFIG_X86_MCE
17777a512e3SSean Christopherson	/*
17877a512e3SSean Christopherson	 * Preserve CR4.MCE if the kernel will enable #MC support.
17977a512e3SSean Christopherson	 * Clearing MCE may fault in some environments (that also force #MC
18077a512e3SSean Christopherson	 * support). Any machine check that occurs before #MC support is fully
18177a512e3SSean Christopherson	 * configured will crash the system regardless of the CR4.MCE value set
18277a512e3SSean Christopherson	 * here.
18377a512e3SSean Christopherson	 */
18477a512e3SSean Christopherson	movq	%cr4, %rcx
18577a512e3SSean Christopherson	andl	$X86_CR4_MCE, %ecx
18677a512e3SSean Christopherson#else
18777a512e3SSean Christopherson	movl	$0, %ecx
18877a512e3SSean Christopherson#endif
18977a512e3SSean Christopherson
190032370b9SKirill A. Shutemov	/* Enable PAE mode, PGE and LA57 */
19177a512e3SSean Christopherson	orl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
192032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL
19339b95522SKirill A. Shutemov	testl	$1, __pgtable_l5_enabled(%rip)
1946f9dd329SKirill A. Shutemov	jz	1f
195032370b9SKirill A. Shutemov	orl	$X86_CR4_LA57, %ecx
1966f9dd329SKirill A. Shutemov1:
197032370b9SKirill A. Shutemov#endif
1988170e6beSH. Peter Anvin	movq	%rcx, %cr4
199250c2277SThomas Gleixner
200032370b9SKirill A. Shutemov	/* Setup early boot stage 4-/5-level pagetables. */
201250c2277SThomas Gleixner	addq	phys_base(%rip), %rax
202c9f09539SJoerg Roedel
203c9f09539SJoerg Roedel	/*
204c9f09539SJoerg Roedel	 * For SEV guests: Verify that the C-bit is correct. A malicious
205c9f09539SJoerg Roedel	 * hypervisor could lie about the C-bit position to perform a ROP
206c9f09539SJoerg Roedel	 * attack on the guest by writing to the unencrypted stack and wait for
207c9f09539SJoerg Roedel	 * the next RET instruction.
208c9f09539SJoerg Roedel	 * %rsi carries pointer to realmode data and is callee-clobbered. Save
209c9f09539SJoerg Roedel	 * and restore it.
210c9f09539SJoerg Roedel	 */
211c9f09539SJoerg Roedel	pushq	%rsi
212c9f09539SJoerg Roedel	movq	%rax, %rdi
213c9f09539SJoerg Roedel	call	sev_verify_cbit
214c9f09539SJoerg Roedel	popq	%rsi
215c9f09539SJoerg Roedel
216f154f290SJoerg Roedel	/*
217f154f290SJoerg Roedel	 * Switch to new page-table
218f154f290SJoerg Roedel	 *
219f154f290SJoerg Roedel	 * For the boot CPU this switches to early_top_pgt which still has the
220f154f290SJoerg Roedel	 * indentity mappings present. The secondary CPUs will switch to the
221f154f290SJoerg Roedel	 * init_top_pgt here, away from the trampoline_pgd and unmap the
222f154f290SJoerg Roedel	 * indentity mapped ranges.
223f154f290SJoerg Roedel	 */
224250c2277SThomas Gleixner	movq	%rax, %cr3
225250c2277SThomas Gleixner
226f154f290SJoerg Roedel	/*
227f154f290SJoerg Roedel	 * Do a global TLB flush after the CR3 switch to make sure the TLB
228f154f290SJoerg Roedel	 * entries from the identity mapping are flushed.
229f154f290SJoerg Roedel	 */
230f154f290SJoerg Roedel	movq	%cr4, %rcx
231f154f290SJoerg Roedel	movq	%rcx, %rax
232f154f290SJoerg Roedel	xorq	$X86_CR4_PGE, %rcx
233f154f290SJoerg Roedel	movq	%rcx, %cr4
234f154f290SJoerg Roedel	movq	%rax, %cr4
235f154f290SJoerg Roedel
236250c2277SThomas Gleixner	/* Ensure I am executing from virtual addresses */
237250c2277SThomas Gleixner	movq	$1f, %rax
238bd89004fSPeter Zijlstra	ANNOTATE_RETPOLINE_SAFE
239250c2277SThomas Gleixner	jmp	*%rax
240250c2277SThomas Gleixner1:
2412704fbb6SJosh Poimboeuf	UNWIND_HINT_EMPTY
2423e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR // above
243250c2277SThomas Gleixner
2443adee777SBrian Gerst#ifdef CONFIG_SMP
2453adee777SBrian Gerst	movl	smpboot_control(%rip), %ecx
2463adee777SBrian Gerst
2473adee777SBrian Gerst	/* Get the per cpu offset for the given CPU# which is in ECX */
2483adee777SBrian Gerst	movq	__per_cpu_offset(,%rcx,8), %rdx
2493adee777SBrian Gerst#else
2503adee777SBrian Gerst	xorl	%edx, %edx /* zero-extended to clear all of RDX */
2513adee777SBrian Gerst#endif /* CONFIG_SMP */
2523adee777SBrian Gerst
2533adee777SBrian Gerst	/*
2543adee777SBrian Gerst	 * Setup a boot time stack - Any secondary CPU will have lost its stack
2553adee777SBrian Gerst	 * by now because the cr3-switch above unmaps the real-mode stack.
2563adee777SBrian Gerst	 *
2573adee777SBrian Gerst	 * RDX contains the per-cpu offset
2583adee777SBrian Gerst	 */
2593adee777SBrian Gerst	movq	pcpu_hot + X86_current_task(%rdx), %rax
2603adee777SBrian Gerst	movq	TASK_threadsp(%rax), %rsp
2613adee777SBrian Gerst
262e04b8833SJoerg Roedel	/*
263e04b8833SJoerg Roedel	 * We must switch to a new descriptor in kernel space for the GDT
264e04b8833SJoerg Roedel	 * because soon the kernel won't have access anymore to the userspace
265e04b8833SJoerg Roedel	 * addresses where we're currently running on. We have to do that here
266e04b8833SJoerg Roedel	 * because in 32bit we couldn't load a 64bit linear address.
267e04b8833SJoerg Roedel	 */
268*c253b640SBrian Gerst	subq	$16, %rsp
269*c253b640SBrian Gerst	movw	$(GDT_SIZE-1), (%rsp)
270*c253b640SBrian Gerst	leaq	gdt_page(%rdx), %rax
271*c253b640SBrian Gerst	movq	%rax, 2(%rsp)
272*c253b640SBrian Gerst	lgdt	(%rsp)
273*c253b640SBrian Gerst	addq	$16, %rsp
274e04b8833SJoerg Roedel
2757b99819dSJoerg Roedel	/* set up data segments */
2767b99819dSJoerg Roedel	xorl %eax,%eax
2777b99819dSJoerg Roedel	movl %eax,%ds
2787b99819dSJoerg Roedel	movl %eax,%ss
2797b99819dSJoerg Roedel	movl %eax,%es
2807b99819dSJoerg Roedel
2817b99819dSJoerg Roedel	/*
2827b99819dSJoerg Roedel	 * We don't really need to load %fs or %gs, but load them anyway
2837b99819dSJoerg Roedel	 * to kill any stale realmode selectors.  This allows execution
2847b99819dSJoerg Roedel	 * under VT hardware.
2857b99819dSJoerg Roedel	 */
2867b99819dSJoerg Roedel	movl %eax,%fs
2877b99819dSJoerg Roedel	movl %eax,%gs
2887b99819dSJoerg Roedel
2897b99819dSJoerg Roedel	/* Set up %gs.
2907b99819dSJoerg Roedel	 *
2917b99819dSJoerg Roedel	 * The base of %gs always points to fixed_percpu_data. If the
2927b99819dSJoerg Roedel	 * stack protector canary is enabled, it is located at %gs:40.
2937b99819dSJoerg Roedel	 * Note that, on SMP, the boot cpu uses init data section until
2947b99819dSJoerg Roedel	 * the per cpu areas are set up.
2957b99819dSJoerg Roedel	 */
2967b99819dSJoerg Roedel	movl	$MSR_GS_BASE,%ecx
2977b99819dSJoerg Roedel	movl	initial_gs(%rip),%eax
2987b99819dSJoerg Roedel	movl	initial_gs+4(%rip),%edx
2997b99819dSJoerg Roedel	wrmsr
3007b99819dSJoerg Roedel
301f5963ba7SJoerg Roedel	/* Setup and Load IDT */
302f5963ba7SJoerg Roedel	pushq	%rsi
303f5963ba7SJoerg Roedel	call	early_setup_idt
304f5963ba7SJoerg Roedel	popq	%rsi
305f5963ba7SJoerg Roedel
306250c2277SThomas Gleixner	/* Check if nx is implemented */
307250c2277SThomas Gleixner	movl	$0x80000001, %eax
308250c2277SThomas Gleixner	cpuid
309250c2277SThomas Gleixner	movl	%edx,%edi
310250c2277SThomas Gleixner
311250c2277SThomas Gleixner	/* Setup EFER (Extended Feature Enable Register) */
312250c2277SThomas Gleixner	movl	$MSR_EFER, %ecx
313250c2277SThomas Gleixner	rdmsr
31477a512e3SSean Christopherson	/*
31577a512e3SSean Christopherson	 * Preserve current value of EFER for comparison and to skip
31677a512e3SSean Christopherson	 * EFER writes if no change was made (for TDX guest)
31777a512e3SSean Christopherson	 */
31877a512e3SSean Christopherson	movl    %eax, %edx
319250c2277SThomas Gleixner	btsl	$_EFER_SCE, %eax	/* Enable System Call */
320250c2277SThomas Gleixner	btl	$20,%edi		/* No Execute supported? */
321250c2277SThomas Gleixner	jnc     1f
322250c2277SThomas Gleixner	btsl	$_EFER_NX, %eax
32378d77df7SH. Peter Anvin	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
324250c2277SThomas Gleixner
32577a512e3SSean Christopherson	/* Avoid writing EFER if no change was made (for TDX guest) */
32677a512e3SSean Christopherson1:	cmpl	%edx, %eax
32777a512e3SSean Christopherson	je	1f
32877a512e3SSean Christopherson	xor	%edx, %edx
32977a512e3SSean Christopherson	wrmsr				/* Make changes effective */
33077a512e3SSean Christopherson1:
331250c2277SThomas Gleixner	/* Setup cr0 */
332369101daSCyrill Gorcunov	movl	$CR0_STATE, %eax
333250c2277SThomas Gleixner	/* Make changes effective */
334250c2277SThomas Gleixner	movq	%rax, %cr0
335250c2277SThomas Gleixner
336250c2277SThomas Gleixner	/* zero EFLAGS after setting rsp */
337250c2277SThomas Gleixner	pushq $0
338250c2277SThomas Gleixner	popfq
339250c2277SThomas Gleixner
3408170e6beSH. Peter Anvin	/* rsi is pointer to real mode structure with interesting info.
341250c2277SThomas Gleixner	   pass it to C */
3428170e6beSH. Peter Anvin	movq	%rsi, %rdi
343250c2277SThomas Gleixner
34479d243a0SBorislav Petkov.Ljump_to_C_code:
345a9468df5SJosh Poimboeuf	/*
346a9468df5SJosh Poimboeuf	 * Jump to run C code and to be on a real kernel address.
347250c2277SThomas Gleixner	 * Since we are running on identity-mapped space we have to jump
348250c2277SThomas Gleixner	 * to the full 64bit address, this is only possible as indirect
349250c2277SThomas Gleixner	 * jump.  In addition we need to ensure %cs is set so we make this
350250c2277SThomas Gleixner	 * a far return.
3518170e6beSH. Peter Anvin	 *
3528170e6beSH. Peter Anvin	 * Note: do not change to far jump indirect with 64bit offset.
3538170e6beSH. Peter Anvin	 *
3548170e6beSH. Peter Anvin	 * AMD does not support far jump indirect with 64bit offset.
3558170e6beSH. Peter Anvin	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
3568170e6beSH. Peter Anvin	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
3578170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
3588170e6beSH. Peter Anvin	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
3598170e6beSH. Peter Anvin	 *		with the target specified by a far pointer in memory.
3608170e6beSH. Peter Anvin	 *
3618170e6beSH. Peter Anvin	 * Intel64 does support 64bit offset.
3628170e6beSH. Peter Anvin	 * Software Developer Manual Vol 2: states:
3638170e6beSH. Peter Anvin	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
3648170e6beSH. Peter Anvin	 *		address given in m16:16
3658170e6beSH. Peter Anvin	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
3668170e6beSH. Peter Anvin	 *		address given in m16:32.
3678170e6beSH. Peter Anvin	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
3688170e6beSH. Peter Anvin	 *		address given in m16:64.
369250c2277SThomas Gleixner	 */
37031dcfec1SJosh Poimboeuf	pushq	$.Lafter_lret	# put return address on stack for unwinder
371a7bea830SJan Beulich	xorl	%ebp, %ebp	# clear frame pointer
372250c2277SThomas Gleixner	movq	initial_code(%rip), %rax
373250c2277SThomas Gleixner	pushq	$__KERNEL_CS	# set correct cs
374250c2277SThomas Gleixner	pushq	%rax		# target address in negative space
375250c2277SThomas Gleixner	lretq
37631dcfec1SJosh Poimboeuf.Lafter_lret:
3773e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR
378bc7b11c0SJiri SlabySYM_CODE_END(secondary_startup_64)
379250c2277SThomas Gleixner
38004633df0SBorislav Petkov#include "verify_cpu.S"
381c9f09539SJoerg Roedel#include "sev_verify_cbit.S"
38204633df0SBorislav Petkov
38342e78e97SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU
38442e78e97SFenghua Yu/*
38542e78e97SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
38642e78e97SFenghua Yu * up already except stack. We just set up stack here. Then call
38779d243a0SBorislav Petkov * start_secondary() via .Ljump_to_C_code.
38842e78e97SFenghua Yu */
389bc7b11c0SJiri SlabySYM_CODE_START(start_cpu0)
390e81dc127SThomas Gleixner	ANNOTATE_NOENDBR
3912704fbb6SJosh Poimboeuf	UNWIND_HINT_EMPTY
3923adee777SBrian Gerst
3933adee777SBrian Gerst	/* Find the idle task stack */
3943adee777SBrian Gerst	movq	PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx
3953adee777SBrian Gerst	movq	TASK_threadsp(%rcx), %rsp
3963adee777SBrian Gerst
39779d243a0SBorislav Petkov	jmp	.Ljump_to_C_code
398bc7b11c0SJiri SlabySYM_CODE_END(start_cpu0)
39942e78e97SFenghua Yu#endif
40042e78e97SFenghua Yu
4011aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT
4021aa9aa8eSJoerg Roedel/*
4031aa9aa8eSJoerg Roedel * VC Exception handler used during early boot when running on kernel
4041aa9aa8eSJoerg Roedel * addresses, but before the switch to the idt_table can be made.
4051aa9aa8eSJoerg Roedel * The early_idt_handler_array can't be used here because it calls into a lot
4061aa9aa8eSJoerg Roedel * of __init code and this handler is also used during CPU offlining/onlining.
4071aa9aa8eSJoerg Roedel * Therefore this handler ends up in the .text section so that it stays around
4081aa9aa8eSJoerg Roedel * when .init.text is freed.
4091aa9aa8eSJoerg Roedel */
4101aa9aa8eSJoerg RoedelSYM_CODE_START_NOALIGN(vc_boot_ghcb)
4111aa9aa8eSJoerg Roedel	UNWIND_HINT_IRET_REGS offset=8
412e8d61bdfSPeter Zijlstra	ENDBR
4131aa9aa8eSJoerg Roedel
414a09a6e23SPeter Zijlstra	ANNOTATE_UNRET_END
415a09a6e23SPeter Zijlstra
4161aa9aa8eSJoerg Roedel	/* Build pt_regs */
4171aa9aa8eSJoerg Roedel	PUSH_AND_CLEAR_REGS
4181aa9aa8eSJoerg Roedel
4191aa9aa8eSJoerg Roedel	/* Call C handler */
4201aa9aa8eSJoerg Roedel	movq    %rsp, %rdi
4211aa9aa8eSJoerg Roedel	movq	ORIG_RAX(%rsp), %rsi
4221aa9aa8eSJoerg Roedel	movq	initial_vc_handler(%rip), %rax
4231aa9aa8eSJoerg Roedel	ANNOTATE_RETPOLINE_SAFE
4241aa9aa8eSJoerg Roedel	call	*%rax
4251aa9aa8eSJoerg Roedel
4261aa9aa8eSJoerg Roedel	/* Unwind pt_regs */
4271aa9aa8eSJoerg Roedel	POP_REGS
4281aa9aa8eSJoerg Roedel
4291aa9aa8eSJoerg Roedel	/* Remove Error Code */
4301aa9aa8eSJoerg Roedel	addq    $8, %rsp
4311aa9aa8eSJoerg Roedel
4321aa9aa8eSJoerg Roedel	iretq
4331aa9aa8eSJoerg RoedelSYM_CODE_END(vc_boot_ghcb)
4341aa9aa8eSJoerg Roedel#endif
4351aa9aa8eSJoerg Roedel
436b32f96c7SJosh Poimboeuf	/* Both SMP bootup and ACPI suspend change these variables */
437da5968aeSSam Ravnborg	__REFDATA
4388170e6beSH. Peter Anvin	.balign	8
439b1bd27b9SJiri SlabySYM_DATA(initial_code,	.quad x86_64_start_kernel)
440b1bd27b9SJiri SlabySYM_DATA(initial_gs,	.quad INIT_PER_CPU_VAR(fixed_percpu_data))
4411aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT
4421aa9aa8eSJoerg RoedelSYM_DATA(initial_vc_handler,	.quad handle_vc_boot_ghcb)
4431aa9aa8eSJoerg Roedel#endif
444b9af7c0dSSuresh Siddha	__FINITDATA
445250c2277SThomas Gleixner
4468170e6beSH. Peter Anvin	__INIT
447bc7b11c0SJiri SlabySYM_CODE_START(early_idt_handler_array)
448749c970aSAndi Kleen	i = 0
449749c970aSAndi Kleen	.rept NUM_EXCEPTION_VECTORS
45082c62fa0SJosh Poimboeuf	.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
4512704fbb6SJosh Poimboeuf		UNWIND_HINT_IRET_REGS
4528f93402bSPeter Zijlstra		ENDBR
4539900aa2fSH. Peter Anvin		pushq $0	# Dummy error code, to make stack frame uniform
4542704fbb6SJosh Poimboeuf	.else
4552704fbb6SJosh Poimboeuf		UNWIND_HINT_IRET_REGS offset=8
4568f93402bSPeter Zijlstra		ENDBR
4579900aa2fSH. Peter Anvin	.endif
4589900aa2fSH. Peter Anvin	pushq $i		# 72(%rsp) Vector number
459cdeb6048SAndy Lutomirski	jmp early_idt_handler_common
4602704fbb6SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
461749c970aSAndi Kleen	i = i + 1
462cdeb6048SAndy Lutomirski	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
463749c970aSAndi Kleen	.endr
464bc7b11c0SJiri SlabySYM_CODE_END(early_idt_handler_array)
4655b2fc515SPeter Zijlstra	ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
4668866cd9dSRoland McGrath
467ef77e688SJiri SlabySYM_CODE_START_LOCAL(early_idt_handler_common)
4688f93402bSPeter Zijlstra	UNWIND_HINT_IRET_REGS offset=16
469a09a6e23SPeter Zijlstra	ANNOTATE_UNRET_END
470cdeb6048SAndy Lutomirski	/*
471cdeb6048SAndy Lutomirski	 * The stack is the hardware frame, an error code or zero, and the
472cdeb6048SAndy Lutomirski	 * vector number.
473cdeb6048SAndy Lutomirski	 */
4749900aa2fSH. Peter Anvin	cld
4759900aa2fSH. Peter Anvin
476250c2277SThomas Gleixner	incl early_recursion_flag(%rip)
4779900aa2fSH. Peter Anvin
4787bbcdb1cSAndy Lutomirski	/* The vector number is currently in the pt_regs->di slot. */
4797bbcdb1cSAndy Lutomirski	pushq %rsi				/* pt_regs->si */
4807bbcdb1cSAndy Lutomirski	movq 8(%rsp), %rsi			/* RSI = vector number */
4817bbcdb1cSAndy Lutomirski	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
4827bbcdb1cSAndy Lutomirski	pushq %rdx				/* pt_regs->dx */
4837bbcdb1cSAndy Lutomirski	pushq %rcx				/* pt_regs->cx */
4847bbcdb1cSAndy Lutomirski	pushq %rax				/* pt_regs->ax */
4857bbcdb1cSAndy Lutomirski	pushq %r8				/* pt_regs->r8 */
4867bbcdb1cSAndy Lutomirski	pushq %r9				/* pt_regs->r9 */
4877bbcdb1cSAndy Lutomirski	pushq %r10				/* pt_regs->r10 */
4887bbcdb1cSAndy Lutomirski	pushq %r11				/* pt_regs->r11 */
4897bbcdb1cSAndy Lutomirski	pushq %rbx				/* pt_regs->bx */
4907bbcdb1cSAndy Lutomirski	pushq %rbp				/* pt_regs->bp */
4917bbcdb1cSAndy Lutomirski	pushq %r12				/* pt_regs->r12 */
4927bbcdb1cSAndy Lutomirski	pushq %r13				/* pt_regs->r13 */
4937bbcdb1cSAndy Lutomirski	pushq %r14				/* pt_regs->r14 */
4947bbcdb1cSAndy Lutomirski	pushq %r15				/* pt_regs->r15 */
4952704fbb6SJosh Poimboeuf	UNWIND_HINT_REGS
4969900aa2fSH. Peter Anvin
4977bbcdb1cSAndy Lutomirski	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
4984b47cdbdSJoerg Roedel	call do_early_exception
4999900aa2fSH. Peter Anvin
5009900aa2fSH. Peter Anvin	decl early_recursion_flag(%rip)
50126c4ef9cSAndy Lutomirski	jmp restore_regs_and_return_to_kernel
502ef77e688SJiri SlabySYM_CODE_END(early_idt_handler_common)
5039900aa2fSH. Peter Anvin
50474d8d9d5SJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT
50574d8d9d5SJoerg Roedel/*
50674d8d9d5SJoerg Roedel * VC Exception handler used during very early boot. The
50774d8d9d5SJoerg Roedel * early_idt_handler_array can't be used because it returns via the
50874d8d9d5SJoerg Roedel * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
50974d8d9d5SJoerg Roedel *
5108b87d8ceSPeter Zijlstra * XXX it does, fix this.
5118b87d8ceSPeter Zijlstra *
51274d8d9d5SJoerg Roedel * This handler will end up in the .init.text section and not be
51374d8d9d5SJoerg Roedel * available to boot secondary CPUs.
51474d8d9d5SJoerg Roedel */
51574d8d9d5SJoerg RoedelSYM_CODE_START_NOALIGN(vc_no_ghcb)
51674d8d9d5SJoerg Roedel	UNWIND_HINT_IRET_REGS offset=8
517e8d61bdfSPeter Zijlstra	ENDBR
51874d8d9d5SJoerg Roedel
519a09a6e23SPeter Zijlstra	ANNOTATE_UNRET_END
520a09a6e23SPeter Zijlstra
52174d8d9d5SJoerg Roedel	/* Build pt_regs */
52274d8d9d5SJoerg Roedel	PUSH_AND_CLEAR_REGS
52374d8d9d5SJoerg Roedel
52474d8d9d5SJoerg Roedel	/* Call C handler */
52574d8d9d5SJoerg Roedel	movq    %rsp, %rdi
52674d8d9d5SJoerg Roedel	movq	ORIG_RAX(%rsp), %rsi
52774d8d9d5SJoerg Roedel	call    do_vc_no_ghcb
52874d8d9d5SJoerg Roedel
52974d8d9d5SJoerg Roedel	/* Unwind pt_regs */
53074d8d9d5SJoerg Roedel	POP_REGS
53174d8d9d5SJoerg Roedel
53274d8d9d5SJoerg Roedel	/* Remove Error Code */
53374d8d9d5SJoerg Roedel	addq    $8, %rsp
53474d8d9d5SJoerg Roedel
53574d8d9d5SJoerg Roedel	/* Pure iret required here - don't use INTERRUPT_RETURN */
53674d8d9d5SJoerg Roedel	iretq
53774d8d9d5SJoerg RoedelSYM_CODE_END(vc_no_ghcb)
53874d8d9d5SJoerg Roedel#endif
539b1bd27b9SJiri Slaby
540b1bd27b9SJiri Slaby#define SYM_DATA_START_PAGE_ALIGNED(name)			\
541b1bd27b9SJiri Slaby	SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
542250c2277SThomas Gleixner
543d9e9a641SDave Hansen#ifdef CONFIG_PAGE_TABLE_ISOLATION
544d9e9a641SDave Hansen/*
545d9e9a641SDave Hansen * Each PGD needs to be 8k long and 8k aligned.  We do not
546d9e9a641SDave Hansen * ever go out to userspace with these, so we do not
547d9e9a641SDave Hansen * strictly *need* the second page, but this allows us to
548d9e9a641SDave Hansen * have a single set_pgd() implementation that does not
549d9e9a641SDave Hansen * need to worry about whether it has 4k or 8k to work
550d9e9a641SDave Hansen * with.
551d9e9a641SDave Hansen *
552d9e9a641SDave Hansen * This ensures PGDs are 8k long:
553d9e9a641SDave Hansen */
554d9e9a641SDave Hansen#define PTI_USER_PGD_FILL	512
555d9e9a641SDave Hansen/* This ensures they are 8k-aligned: */
556b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \
557b1bd27b9SJiri Slaby	SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
558d9e9a641SDave Hansen#else
559b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \
560b1bd27b9SJiri Slaby	SYM_DATA_START_PAGE_ALIGNED(name)
561d9e9a641SDave Hansen#define PTI_USER_PGD_FILL	0
562d9e9a641SDave Hansen#endif
563d9e9a641SDave Hansen
564250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */
565250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT)			\
566250c2277SThomas Gleixner	i = 0 ;						\
567250c2277SThomas Gleixner	.rept (COUNT) ;					\
5680e192b99SCyrill Gorcunov	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
569250c2277SThomas Gleixner	i = i + 1 ;					\
570250c2277SThomas Gleixner	.endr
571250c2277SThomas Gleixner
5728170e6beSH. Peter Anvin	__INITDATA
5731a8770b7SJiri Slaby	.balign 4
5741a8770b7SJiri Slaby
575b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(early_top_pgt)
5766f9dd329SKirill A. Shutemov	.fill	512,8,0
577d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
578b1bd27b9SJiri SlabySYM_DATA_END(early_top_pgt)
5798170e6beSH. Peter Anvin
580b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
5818170e6beSH. Peter Anvin	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
582b1bd27b9SJiri SlabySYM_DATA_END(early_dynamic_pgts)
5838170e6beSH. Peter Anvin
584b1bd27b9SJiri SlabySYM_DATA(early_recursion_flag, .long 0)
5851a8770b7SJiri Slaby
586b9af7c0dSSuresh Siddha	.data
5878170e6beSH. Peter Anvin
5887733607fSMaran Wilson#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
589b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt)
59021729f81STom Lendacky	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
591b9952ec7SKirill A. Shutemov	.org    init_top_pgt + L4_PAGE_OFFSET*8, 0
59221729f81STom Lendacky	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
593b9952ec7SKirill A. Shutemov	.org    init_top_pgt + L4_START_KERNEL*8, 0
594250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
59521729f81STom Lendacky	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
596d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
597b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt)
598250c2277SThomas Gleixner
599b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
60021729f81STom Lendacky	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
601250c2277SThomas Gleixner	.fill	511, 8, 0
602b1bd27b9SJiri SlabySYM_DATA_END(level3_ident_pgt)
603b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
604430d4005SDave Hansen	/*
605430d4005SDave Hansen	 * Since I easily can, map the first 1G.
6068170e6beSH. Peter Anvin	 * Don't set NX because code runs from these pages.
607430d4005SDave Hansen	 *
608430d4005SDave Hansen	 * Note: This sets _PAGE_GLOBAL despite whether
609430d4005SDave Hansen	 * the CPU supports it or it is enabled.  But,
610430d4005SDave Hansen	 * the CPU should ignore the bit.
6118170e6beSH. Peter Anvin	 */
6128170e6beSH. Peter Anvin	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
613b1bd27b9SJiri SlabySYM_DATA_END(level2_ident_pgt)
6144375c299SKirill A. Shutemov#else
615b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt)
6164375c299SKirill A. Shutemov	.fill	512,8,0
617d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
618b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt)
6198170e6beSH. Peter Anvin#endif
620250c2277SThomas Gleixner
621032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL
622b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
623032370b9SKirill A. Shutemov	.fill	511,8,0
62421729f81STom Lendacky	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
625b1bd27b9SJiri SlabySYM_DATA_END(level4_kernel_pgt)
626032370b9SKirill A. Shutemov#endif
627032370b9SKirill A. Shutemov
628b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
629a6523748SEduardo Habkost	.fill	L3_START_KERNEL,8,0
630250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
63121729f81STom Lendacky	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
63221729f81STom Lendacky	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
633b1bd27b9SJiri SlabySYM_DATA_END(level3_kernel_pgt)
634250c2277SThomas Gleixner
635b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
63688f3aec7SIngo Molnar	/*
637ea3186b9SArvind Sankar	 * Kernel high mapping.
63888f3aec7SIngo Molnar	 *
639ea3186b9SArvind Sankar	 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
640ea3186b9SArvind Sankar	 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
641ea3186b9SArvind Sankar	 * 512 MiB otherwise.
64288f3aec7SIngo Molnar	 *
643ea3186b9SArvind Sankar	 * (NOTE: after that starts the module area, see MODULES_VADDR.)
644430d4005SDave Hansen	 *
645ea3186b9SArvind Sankar	 * This table is eventually used by the kernel during normal runtime.
646ea3186b9SArvind Sankar	 * Care must be taken to clear out undesired bits later, like _PAGE_RW
647ea3186b9SArvind Sankar	 * or _PAGE_GLOBAL in some cases.
64888f3aec7SIngo Molnar	 */
649ea3186b9SArvind Sankar	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
650b1bd27b9SJiri SlabySYM_DATA_END(level2_kernel_pgt)
651250c2277SThomas Gleixner
652b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
65305ab1d8aSFeng Tang	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
65405ab1d8aSFeng Tang	pgtno = 0
65505ab1d8aSFeng Tang	.rept (FIXMAP_PMD_NUM)
65605ab1d8aSFeng Tang	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
65705ab1d8aSFeng Tang		+ _PAGE_TABLE_NOENC;
65805ab1d8aSFeng Tang	pgtno = pgtno + 1
65905ab1d8aSFeng Tang	.endr
66005ab1d8aSFeng Tang	/* 6 MB reserved space + a 2MB hole */
66105ab1d8aSFeng Tang	.fill	4,8,0
662b1bd27b9SJiri SlabySYM_DATA_END(level2_fixmap_pgt)
6638170e6beSH. Peter Anvin
664b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
66505ab1d8aSFeng Tang	.rept (FIXMAP_PMD_NUM)
666250c2277SThomas Gleixner	.fill	512,8,0
66705ab1d8aSFeng Tang	.endr
668b1bd27b9SJiri SlabySYM_DATA_END(level1_fixmap_pgt)
669250c2277SThomas Gleixner
670250c2277SThomas Gleixner#undef PMDS
671250c2277SThomas Gleixner
672250c2277SThomas Gleixner	.data
673250c2277SThomas Gleixner	.align 16
674250c2277SThomas Gleixner
6753adee777SBrian GerstSYM_DATA(smpboot_control,		.long 0)
6763adee777SBrian Gerst
6773adee777SBrian Gerst	.align 16
678250c2277SThomas Gleixner/* This must match the first entry in level2_kernel_pgt */
679b1bd27b9SJiri SlabySYM_DATA(phys_base, .quad 0x0)
680784d5699SAl ViroEXPORT_SYMBOL(phys_base)
681250c2277SThomas Gleixner
6828c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S"
683250c2277SThomas Gleixner
68402b7da37STim Abbott	__PAGE_ALIGNED_BSS
685b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
686250c2277SThomas Gleixner	.skip PAGE_SIZE
687b1bd27b9SJiri SlabySYM_DATA_END(empty_zero_page)
688784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page)
689ef7f0d6aSAndrey Ryabinin
690