xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision 77a512e3)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4 *
5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10 */
11
12
13#include <linux/linkage.h>
14#include <linux/threads.h>
15#include <linux/init.h>
16#include <linux/pgtable.h>
17#include <asm/segment.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
23#include <asm/nops.h>
24#include "../entry/calling.h"
25#include <asm/export.h>
26#include <asm/nospec-branch.h>
27#include <asm/fixmap.h>
28
29/*
30 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
31 * because we need identity-mapped pages.
32 */
33#define l4_index(x)	(((x) >> 39) & 511)
34#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
35
36L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
37L4_START_KERNEL = l4_index(__START_KERNEL_map)
38
39L3_START_KERNEL = pud_index(__START_KERNEL_map)
40
41	.text
42	__HEAD
43	.code64
44SYM_CODE_START_NOALIGN(startup_64)
45	UNWIND_HINT_EMPTY
46	/*
47	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
48	 * and someone has loaded an identity mapped page table
49	 * for us.  These identity mapped page tables map all of the
50	 * kernel pages and possibly all of memory.
51	 *
52	 * %rsi holds a physical pointer to real_mode_data.
53	 *
54	 * We come here either directly from a 64bit bootloader, or from
55	 * arch/x86/boot/compressed/head_64.S.
56	 *
57	 * We only come here initially at boot nothing else comes here.
58	 *
59	 * Since we may be loaded at an address different from what we were
60	 * compiled to run at we first fixup the physical addresses in our page
61	 * tables and then reload them.
62	 */
63
64	/* Set up the stack for verify_cpu(), similar to initial_stack below */
65	leaq	(__end_init_task - FRAME_SIZE)(%rip), %rsp
66
67	leaq	_text(%rip), %rdi
68	pushq	%rsi
69	call	startup_64_setup_env
70	popq	%rsi
71
72	/* Now switch to __KERNEL_CS so IRET works reliably */
73	pushq	$__KERNEL_CS
74	leaq	.Lon_kernel_cs(%rip), %rax
75	pushq	%rax
76	lretq
77
78.Lon_kernel_cs:
79	UNWIND_HINT_EMPTY
80
81	/* Sanitize CPU configuration */
82	call verify_cpu
83
84	/*
85	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
86	 * the kernel and retrieve the modifier (SME encryption mask if SME
87	 * is active) to be added to the initial pgdir entry that will be
88	 * programmed into CR3.
89	 */
90	leaq	_text(%rip), %rdi
91	pushq	%rsi
92	call	__startup_64
93	popq	%rsi
94
95	/* Form the CR3 value being sure to include the CR3 modifier */
96	addq	$(early_top_pgt - __START_KERNEL_map), %rax
97	jmp 1f
98SYM_CODE_END(startup_64)
99
100SYM_CODE_START(secondary_startup_64)
101	UNWIND_HINT_EMPTY
102	ANNOTATE_NOENDBR
103	/*
104	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
105	 * and someone has loaded a mapped page table.
106	 *
107	 * %rsi holds a physical pointer to real_mode_data.
108	 *
109	 * We come here either from startup_64 (using physical addresses)
110	 * or from trampoline.S (using virtual addresses).
111	 *
112	 * Using virtual addresses from trampoline.S removes the need
113	 * to have any identity mapped pages in the kernel page table
114	 * after the boot processor executes this code.
115	 */
116
117	/* Sanitize CPU configuration */
118	call verify_cpu
119
120	/*
121	 * The secondary_startup_64_no_verify entry point is only used by
122	 * SEV-ES guests. In those guests the call to verify_cpu() would cause
123	 * #VC exceptions which can not be handled at this stage of secondary
124	 * CPU bringup.
125	 *
126	 * All non SEV-ES systems, especially Intel systems, need to execute
127	 * verify_cpu() above to make sure NX is enabled.
128	 */
129SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
130	UNWIND_HINT_EMPTY
131	ANNOTATE_NOENDBR
132
133	/*
134	 * Retrieve the modifier (SME encryption mask if SME is active) to be
135	 * added to the initial pgdir entry that will be programmed into CR3.
136	 */
137	pushq	%rsi
138	call	__startup_secondary_64
139	popq	%rsi
140
141	/* Form the CR3 value being sure to include the CR3 modifier */
142	addq	$(init_top_pgt - __START_KERNEL_map), %rax
1431:
144
145#ifdef CONFIG_X86_MCE
146	/*
147	 * Preserve CR4.MCE if the kernel will enable #MC support.
148	 * Clearing MCE may fault in some environments (that also force #MC
149	 * support). Any machine check that occurs before #MC support is fully
150	 * configured will crash the system regardless of the CR4.MCE value set
151	 * here.
152	 */
153	movq	%cr4, %rcx
154	andl	$X86_CR4_MCE, %ecx
155#else
156	movl	$0, %ecx
157#endif
158
159	/* Enable PAE mode, PGE and LA57 */
160	orl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
161#ifdef CONFIG_X86_5LEVEL
162	testl	$1, __pgtable_l5_enabled(%rip)
163	jz	1f
164	orl	$X86_CR4_LA57, %ecx
1651:
166#endif
167	movq	%rcx, %cr4
168
169	/* Setup early boot stage 4-/5-level pagetables. */
170	addq	phys_base(%rip), %rax
171
172	/*
173	 * For SEV guests: Verify that the C-bit is correct. A malicious
174	 * hypervisor could lie about the C-bit position to perform a ROP
175	 * attack on the guest by writing to the unencrypted stack and wait for
176	 * the next RET instruction.
177	 * %rsi carries pointer to realmode data and is callee-clobbered. Save
178	 * and restore it.
179	 */
180	pushq	%rsi
181	movq	%rax, %rdi
182	call	sev_verify_cbit
183	popq	%rsi
184
185	/*
186	 * Switch to new page-table
187	 *
188	 * For the boot CPU this switches to early_top_pgt which still has the
189	 * indentity mappings present. The secondary CPUs will switch to the
190	 * init_top_pgt here, away from the trampoline_pgd and unmap the
191	 * indentity mapped ranges.
192	 */
193	movq	%rax, %cr3
194
195	/*
196	 * Do a global TLB flush after the CR3 switch to make sure the TLB
197	 * entries from the identity mapping are flushed.
198	 */
199	movq	%cr4, %rcx
200	movq	%rcx, %rax
201	xorq	$X86_CR4_PGE, %rcx
202	movq	%rcx, %cr4
203	movq	%rax, %cr4
204
205	/* Ensure I am executing from virtual addresses */
206	movq	$1f, %rax
207	ANNOTATE_RETPOLINE_SAFE
208	jmp	*%rax
2091:
210	UNWIND_HINT_EMPTY
211	ANNOTATE_NOENDBR // above
212
213	/*
214	 * We must switch to a new descriptor in kernel space for the GDT
215	 * because soon the kernel won't have access anymore to the userspace
216	 * addresses where we're currently running on. We have to do that here
217	 * because in 32bit we couldn't load a 64bit linear address.
218	 */
219	lgdt	early_gdt_descr(%rip)
220
221	/* set up data segments */
222	xorl %eax,%eax
223	movl %eax,%ds
224	movl %eax,%ss
225	movl %eax,%es
226
227	/*
228	 * We don't really need to load %fs or %gs, but load them anyway
229	 * to kill any stale realmode selectors.  This allows execution
230	 * under VT hardware.
231	 */
232	movl %eax,%fs
233	movl %eax,%gs
234
235	/* Set up %gs.
236	 *
237	 * The base of %gs always points to fixed_percpu_data. If the
238	 * stack protector canary is enabled, it is located at %gs:40.
239	 * Note that, on SMP, the boot cpu uses init data section until
240	 * the per cpu areas are set up.
241	 */
242	movl	$MSR_GS_BASE,%ecx
243	movl	initial_gs(%rip),%eax
244	movl	initial_gs+4(%rip),%edx
245	wrmsr
246
247	/*
248	 * Setup a boot time stack - Any secondary CPU will have lost its stack
249	 * by now because the cr3-switch above unmaps the real-mode stack
250	 */
251	movq initial_stack(%rip), %rsp
252
253	/* Setup and Load IDT */
254	pushq	%rsi
255	call	early_setup_idt
256	popq	%rsi
257
258	/* Check if nx is implemented */
259	movl	$0x80000001, %eax
260	cpuid
261	movl	%edx,%edi
262
263	/* Setup EFER (Extended Feature Enable Register) */
264	movl	$MSR_EFER, %ecx
265	rdmsr
266	/*
267	 * Preserve current value of EFER for comparison and to skip
268	 * EFER writes if no change was made (for TDX guest)
269	 */
270	movl    %eax, %edx
271	btsl	$_EFER_SCE, %eax	/* Enable System Call */
272	btl	$20,%edi		/* No Execute supported? */
273	jnc     1f
274	btsl	$_EFER_NX, %eax
275	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
276
277	/* Avoid writing EFER if no change was made (for TDX guest) */
2781:	cmpl	%edx, %eax
279	je	1f
280	xor	%edx, %edx
281	wrmsr				/* Make changes effective */
2821:
283	/* Setup cr0 */
284	movl	$CR0_STATE, %eax
285	/* Make changes effective */
286	movq	%rax, %cr0
287
288	/* zero EFLAGS after setting rsp */
289	pushq $0
290	popfq
291
292	/* rsi is pointer to real mode structure with interesting info.
293	   pass it to C */
294	movq	%rsi, %rdi
295
296.Ljump_to_C_code:
297	/*
298	 * Jump to run C code and to be on a real kernel address.
299	 * Since we are running on identity-mapped space we have to jump
300	 * to the full 64bit address, this is only possible as indirect
301	 * jump.  In addition we need to ensure %cs is set so we make this
302	 * a far return.
303	 *
304	 * Note: do not change to far jump indirect with 64bit offset.
305	 *
306	 * AMD does not support far jump indirect with 64bit offset.
307	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
308	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
309	 *		with the target specified by a far pointer in memory.
310	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
311	 *		with the target specified by a far pointer in memory.
312	 *
313	 * Intel64 does support 64bit offset.
314	 * Software Developer Manual Vol 2: states:
315	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
316	 *		address given in m16:16
317	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
318	 *		address given in m16:32.
319	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
320	 *		address given in m16:64.
321	 */
322	pushq	$.Lafter_lret	# put return address on stack for unwinder
323	xorl	%ebp, %ebp	# clear frame pointer
324	movq	initial_code(%rip), %rax
325	pushq	$__KERNEL_CS	# set correct cs
326	pushq	%rax		# target address in negative space
327	lretq
328.Lafter_lret:
329	ANNOTATE_NOENDBR
330SYM_CODE_END(secondary_startup_64)
331
332#include "verify_cpu.S"
333#include "sev_verify_cbit.S"
334
335#ifdef CONFIG_HOTPLUG_CPU
336/*
337 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
338 * up already except stack. We just set up stack here. Then call
339 * start_secondary() via .Ljump_to_C_code.
340 */
341SYM_CODE_START(start_cpu0)
342	UNWIND_HINT_EMPTY
343	movq	initial_stack(%rip), %rsp
344	jmp	.Ljump_to_C_code
345SYM_CODE_END(start_cpu0)
346#endif
347
348#ifdef CONFIG_AMD_MEM_ENCRYPT
349/*
350 * VC Exception handler used during early boot when running on kernel
351 * addresses, but before the switch to the idt_table can be made.
352 * The early_idt_handler_array can't be used here because it calls into a lot
353 * of __init code and this handler is also used during CPU offlining/onlining.
354 * Therefore this handler ends up in the .text section so that it stays around
355 * when .init.text is freed.
356 */
357SYM_CODE_START_NOALIGN(vc_boot_ghcb)
358	UNWIND_HINT_IRET_REGS offset=8
359	ENDBR
360
361	/* Build pt_regs */
362	PUSH_AND_CLEAR_REGS
363
364	/* Call C handler */
365	movq    %rsp, %rdi
366	movq	ORIG_RAX(%rsp), %rsi
367	movq	initial_vc_handler(%rip), %rax
368	ANNOTATE_RETPOLINE_SAFE
369	call	*%rax
370
371	/* Unwind pt_regs */
372	POP_REGS
373
374	/* Remove Error Code */
375	addq    $8, %rsp
376
377	iretq
378SYM_CODE_END(vc_boot_ghcb)
379#endif
380
381	/* Both SMP bootup and ACPI suspend change these variables */
382	__REFDATA
383	.balign	8
384SYM_DATA(initial_code,	.quad x86_64_start_kernel)
385SYM_DATA(initial_gs,	.quad INIT_PER_CPU_VAR(fixed_percpu_data))
386#ifdef CONFIG_AMD_MEM_ENCRYPT
387SYM_DATA(initial_vc_handler,	.quad handle_vc_boot_ghcb)
388#endif
389
390/*
391 * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder
392 * reliably detect the end of the stack.
393 */
394SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE)
395	__FINITDATA
396
397	__INIT
398SYM_CODE_START(early_idt_handler_array)
399	i = 0
400	.rept NUM_EXCEPTION_VECTORS
401	.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
402		UNWIND_HINT_IRET_REGS
403		ENDBR
404		pushq $0	# Dummy error code, to make stack frame uniform
405	.else
406		UNWIND_HINT_IRET_REGS offset=8
407		ENDBR
408	.endif
409	pushq $i		# 72(%rsp) Vector number
410	jmp early_idt_handler_common
411	UNWIND_HINT_IRET_REGS
412	i = i + 1
413	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
414	.endr
415SYM_CODE_END(early_idt_handler_array)
416	ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
417
418SYM_CODE_START_LOCAL(early_idt_handler_common)
419	UNWIND_HINT_IRET_REGS offset=16
420	/*
421	 * The stack is the hardware frame, an error code or zero, and the
422	 * vector number.
423	 */
424	cld
425
426	incl early_recursion_flag(%rip)
427
428	/* The vector number is currently in the pt_regs->di slot. */
429	pushq %rsi				/* pt_regs->si */
430	movq 8(%rsp), %rsi			/* RSI = vector number */
431	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
432	pushq %rdx				/* pt_regs->dx */
433	pushq %rcx				/* pt_regs->cx */
434	pushq %rax				/* pt_regs->ax */
435	pushq %r8				/* pt_regs->r8 */
436	pushq %r9				/* pt_regs->r9 */
437	pushq %r10				/* pt_regs->r10 */
438	pushq %r11				/* pt_regs->r11 */
439	pushq %rbx				/* pt_regs->bx */
440	pushq %rbp				/* pt_regs->bp */
441	pushq %r12				/* pt_regs->r12 */
442	pushq %r13				/* pt_regs->r13 */
443	pushq %r14				/* pt_regs->r14 */
444	pushq %r15				/* pt_regs->r15 */
445	UNWIND_HINT_REGS
446
447	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
448	call do_early_exception
449
450	decl early_recursion_flag(%rip)
451	jmp restore_regs_and_return_to_kernel
452SYM_CODE_END(early_idt_handler_common)
453
454#ifdef CONFIG_AMD_MEM_ENCRYPT
455/*
456 * VC Exception handler used during very early boot. The
457 * early_idt_handler_array can't be used because it returns via the
458 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
459 *
460 * XXX it does, fix this.
461 *
462 * This handler will end up in the .init.text section and not be
463 * available to boot secondary CPUs.
464 */
465SYM_CODE_START_NOALIGN(vc_no_ghcb)
466	UNWIND_HINT_IRET_REGS offset=8
467	ENDBR
468
469	/* Build pt_regs */
470	PUSH_AND_CLEAR_REGS
471
472	/* Call C handler */
473	movq    %rsp, %rdi
474	movq	ORIG_RAX(%rsp), %rsi
475	call    do_vc_no_ghcb
476
477	/* Unwind pt_regs */
478	POP_REGS
479
480	/* Remove Error Code */
481	addq    $8, %rsp
482
483	/* Pure iret required here - don't use INTERRUPT_RETURN */
484	iretq
485SYM_CODE_END(vc_no_ghcb)
486#endif
487
488#define SYM_DATA_START_PAGE_ALIGNED(name)			\
489	SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
490
491#ifdef CONFIG_PAGE_TABLE_ISOLATION
492/*
493 * Each PGD needs to be 8k long and 8k aligned.  We do not
494 * ever go out to userspace with these, so we do not
495 * strictly *need* the second page, but this allows us to
496 * have a single set_pgd() implementation that does not
497 * need to worry about whether it has 4k or 8k to work
498 * with.
499 *
500 * This ensures PGDs are 8k long:
501 */
502#define PTI_USER_PGD_FILL	512
503/* This ensures they are 8k-aligned: */
504#define SYM_DATA_START_PTI_ALIGNED(name) \
505	SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
506#else
507#define SYM_DATA_START_PTI_ALIGNED(name) \
508	SYM_DATA_START_PAGE_ALIGNED(name)
509#define PTI_USER_PGD_FILL	0
510#endif
511
512/* Automate the creation of 1 to 1 mapping pmd entries */
513#define PMDS(START, PERM, COUNT)			\
514	i = 0 ;						\
515	.rept (COUNT) ;					\
516	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
517	i = i + 1 ;					\
518	.endr
519
520	__INITDATA
521	.balign 4
522
523SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
524	.fill	512,8,0
525	.fill	PTI_USER_PGD_FILL,8,0
526SYM_DATA_END(early_top_pgt)
527
528SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
529	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
530SYM_DATA_END(early_dynamic_pgts)
531
532SYM_DATA(early_recursion_flag, .long 0)
533
534	.data
535
536#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
537SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
538	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
539	.org    init_top_pgt + L4_PAGE_OFFSET*8, 0
540	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
541	.org    init_top_pgt + L4_START_KERNEL*8, 0
542	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
543	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
544	.fill	PTI_USER_PGD_FILL,8,0
545SYM_DATA_END(init_top_pgt)
546
547SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
548	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
549	.fill	511, 8, 0
550SYM_DATA_END(level3_ident_pgt)
551SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
552	/*
553	 * Since I easily can, map the first 1G.
554	 * Don't set NX because code runs from these pages.
555	 *
556	 * Note: This sets _PAGE_GLOBAL despite whether
557	 * the CPU supports it or it is enabled.  But,
558	 * the CPU should ignore the bit.
559	 */
560	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
561SYM_DATA_END(level2_ident_pgt)
562#else
563SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
564	.fill	512,8,0
565	.fill	PTI_USER_PGD_FILL,8,0
566SYM_DATA_END(init_top_pgt)
567#endif
568
569#ifdef CONFIG_X86_5LEVEL
570SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
571	.fill	511,8,0
572	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
573SYM_DATA_END(level4_kernel_pgt)
574#endif
575
576SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
577	.fill	L3_START_KERNEL,8,0
578	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
579	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
580	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
581SYM_DATA_END(level3_kernel_pgt)
582
583SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
584	/*
585	 * Kernel high mapping.
586	 *
587	 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
588	 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
589	 * 512 MiB otherwise.
590	 *
591	 * (NOTE: after that starts the module area, see MODULES_VADDR.)
592	 *
593	 * This table is eventually used by the kernel during normal runtime.
594	 * Care must be taken to clear out undesired bits later, like _PAGE_RW
595	 * or _PAGE_GLOBAL in some cases.
596	 */
597	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
598SYM_DATA_END(level2_kernel_pgt)
599
600SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
601	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
602	pgtno = 0
603	.rept (FIXMAP_PMD_NUM)
604	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
605		+ _PAGE_TABLE_NOENC;
606	pgtno = pgtno + 1
607	.endr
608	/* 6 MB reserved space + a 2MB hole */
609	.fill	4,8,0
610SYM_DATA_END(level2_fixmap_pgt)
611
612SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
613	.rept (FIXMAP_PMD_NUM)
614	.fill	512,8,0
615	.endr
616SYM_DATA_END(level1_fixmap_pgt)
617
618#undef PMDS
619
620	.data
621	.align 16
622
623SYM_DATA(early_gdt_descr,		.word GDT_ENTRIES*8-1)
624SYM_DATA_LOCAL(early_gdt_descr_base,	.quad INIT_PER_CPU_VAR(gdt_page))
625
626	.align 16
627/* This must match the first entry in level2_kernel_pgt */
628SYM_DATA(phys_base, .quad 0x0)
629EXPORT_SYMBOL(phys_base)
630
631#include "../../x86/xen/xen-head.S"
632
633	__PAGE_ALIGNED_BSS
634SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
635	.skip PAGE_SIZE
636SYM_DATA_END(empty_zero_page)
637EXPORT_SYMBOL(empty_zero_page)
638
639