xref: /openbmc/linux/arch/x86/kernel/head_64.S (revision 3adee777)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4 *
5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10 */
11
12
13#include <linux/linkage.h>
14#include <linux/threads.h>
15#include <linux/init.h>
16#include <linux/pgtable.h>
17#include <asm/segment.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
23#include <asm/nops.h>
24#include "../entry/calling.h"
25#include <asm/export.h>
26#include <asm/nospec-branch.h>
27#include <asm/fixmap.h>
28
29/*
30 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
31 * because we need identity-mapped pages.
32 */
33#define l4_index(x)	(((x) >> 39) & 511)
34#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
35
36L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
37L4_START_KERNEL = l4_index(__START_KERNEL_map)
38
39L3_START_KERNEL = pud_index(__START_KERNEL_map)
40
41	.text
42	__HEAD
43	.code64
44SYM_CODE_START_NOALIGN(startup_64)
45	UNWIND_HINT_EMPTY
46	/*
47	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
48	 * and someone has loaded an identity mapped page table
49	 * for us.  These identity mapped page tables map all of the
50	 * kernel pages and possibly all of memory.
51	 *
52	 * %rsi holds a physical pointer to real_mode_data.
53	 *
54	 * We come here either directly from a 64bit bootloader, or from
55	 * arch/x86/boot/compressed/head_64.S.
56	 *
57	 * We only come here initially at boot nothing else comes here.
58	 *
59	 * Since we may be loaded at an address different from what we were
60	 * compiled to run at we first fixup the physical addresses in our page
61	 * tables and then reload them.
62	 */
63
64	/* Set up the stack for verify_cpu() */
65	leaq	(__end_init_task - PTREGS_SIZE)(%rip), %rsp
66
67	leaq	_text(%rip), %rdi
68
69	/*
70	 * initial_gs points to initial fixed_percpu_data struct with storage for
71	 * the stack protector canary. Global pointer fixups are needed at this
72	 * stage, so apply them as is done in fixup_pointer(), and initialize %gs
73	 * such that the canary can be accessed at %gs:40 for subsequent C calls.
74	 */
75	movl	$MSR_GS_BASE, %ecx
76	movq	initial_gs(%rip), %rax
77	movq	$_text, %rdx
78	subq	%rdx, %rax
79	addq	%rdi, %rax
80	movq	%rax, %rdx
81	shrq	$32,  %rdx
82	wrmsr
83
84	pushq	%rsi
85	call	startup_64_setup_env
86	popq	%rsi
87
88#ifdef CONFIG_AMD_MEM_ENCRYPT
89	/*
90	 * Activate SEV/SME memory encryption if supported/enabled. This needs to
91	 * be done now, since this also includes setup of the SEV-SNP CPUID table,
92	 * which needs to be done before any CPUID instructions are executed in
93	 * subsequent code.
94	 */
95	movq	%rsi, %rdi
96	pushq	%rsi
97	call	sme_enable
98	popq	%rsi
99#endif
100
101	/* Now switch to __KERNEL_CS so IRET works reliably */
102	pushq	$__KERNEL_CS
103	leaq	.Lon_kernel_cs(%rip), %rax
104	pushq	%rax
105	lretq
106
107.Lon_kernel_cs:
108	UNWIND_HINT_EMPTY
109
110	/* Sanitize CPU configuration */
111	call verify_cpu
112
113	/*
114	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
115	 * the kernel and retrieve the modifier (SME encryption mask if SME
116	 * is active) to be added to the initial pgdir entry that will be
117	 * programmed into CR3.
118	 */
119	leaq	_text(%rip), %rdi
120	pushq	%rsi
121	call	__startup_64
122	popq	%rsi
123
124	/* Form the CR3 value being sure to include the CR3 modifier */
125	addq	$(early_top_pgt - __START_KERNEL_map), %rax
126	jmp 1f
127SYM_CODE_END(startup_64)
128
129SYM_CODE_START(secondary_startup_64)
130	UNWIND_HINT_EMPTY
131	ANNOTATE_NOENDBR
132	/*
133	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
134	 * and someone has loaded a mapped page table.
135	 *
136	 * %rsi holds a physical pointer to real_mode_data.
137	 *
138	 * We come here either from startup_64 (using physical addresses)
139	 * or from trampoline.S (using virtual addresses).
140	 *
141	 * Using virtual addresses from trampoline.S removes the need
142	 * to have any identity mapped pages in the kernel page table
143	 * after the boot processor executes this code.
144	 */
145
146	/* Sanitize CPU configuration */
147	call verify_cpu
148
149	/*
150	 * The secondary_startup_64_no_verify entry point is only used by
151	 * SEV-ES guests. In those guests the call to verify_cpu() would cause
152	 * #VC exceptions which can not be handled at this stage of secondary
153	 * CPU bringup.
154	 *
155	 * All non SEV-ES systems, especially Intel systems, need to execute
156	 * verify_cpu() above to make sure NX is enabled.
157	 */
158SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
159	UNWIND_HINT_EMPTY
160	ANNOTATE_NOENDBR
161
162	/*
163	 * Retrieve the modifier (SME encryption mask if SME is active) to be
164	 * added to the initial pgdir entry that will be programmed into CR3.
165	 */
166#ifdef CONFIG_AMD_MEM_ENCRYPT
167	movq	sme_me_mask, %rax
168#else
169	xorq	%rax, %rax
170#endif
171
172	/* Form the CR3 value being sure to include the CR3 modifier */
173	addq	$(init_top_pgt - __START_KERNEL_map), %rax
1741:
175
176#ifdef CONFIG_X86_MCE
177	/*
178	 * Preserve CR4.MCE if the kernel will enable #MC support.
179	 * Clearing MCE may fault in some environments (that also force #MC
180	 * support). Any machine check that occurs before #MC support is fully
181	 * configured will crash the system regardless of the CR4.MCE value set
182	 * here.
183	 */
184	movq	%cr4, %rcx
185	andl	$X86_CR4_MCE, %ecx
186#else
187	movl	$0, %ecx
188#endif
189
190	/* Enable PAE mode, PGE and LA57 */
191	orl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
192#ifdef CONFIG_X86_5LEVEL
193	testl	$1, __pgtable_l5_enabled(%rip)
194	jz	1f
195	orl	$X86_CR4_LA57, %ecx
1961:
197#endif
198	movq	%rcx, %cr4
199
200	/* Setup early boot stage 4-/5-level pagetables. */
201	addq	phys_base(%rip), %rax
202
203	/*
204	 * For SEV guests: Verify that the C-bit is correct. A malicious
205	 * hypervisor could lie about the C-bit position to perform a ROP
206	 * attack on the guest by writing to the unencrypted stack and wait for
207	 * the next RET instruction.
208	 * %rsi carries pointer to realmode data and is callee-clobbered. Save
209	 * and restore it.
210	 */
211	pushq	%rsi
212	movq	%rax, %rdi
213	call	sev_verify_cbit
214	popq	%rsi
215
216	/*
217	 * Switch to new page-table
218	 *
219	 * For the boot CPU this switches to early_top_pgt which still has the
220	 * indentity mappings present. The secondary CPUs will switch to the
221	 * init_top_pgt here, away from the trampoline_pgd and unmap the
222	 * indentity mapped ranges.
223	 */
224	movq	%rax, %cr3
225
226	/*
227	 * Do a global TLB flush after the CR3 switch to make sure the TLB
228	 * entries from the identity mapping are flushed.
229	 */
230	movq	%cr4, %rcx
231	movq	%rcx, %rax
232	xorq	$X86_CR4_PGE, %rcx
233	movq	%rcx, %cr4
234	movq	%rax, %cr4
235
236	/* Ensure I am executing from virtual addresses */
237	movq	$1f, %rax
238	ANNOTATE_RETPOLINE_SAFE
239	jmp	*%rax
2401:
241	UNWIND_HINT_EMPTY
242	ANNOTATE_NOENDBR // above
243
244#ifdef CONFIG_SMP
245	movl	smpboot_control(%rip), %ecx
246
247	/* Get the per cpu offset for the given CPU# which is in ECX */
248	movq	__per_cpu_offset(,%rcx,8), %rdx
249#else
250	xorl	%edx, %edx /* zero-extended to clear all of RDX */
251#endif /* CONFIG_SMP */
252
253	/*
254	 * Setup a boot time stack - Any secondary CPU will have lost its stack
255	 * by now because the cr3-switch above unmaps the real-mode stack.
256	 *
257	 * RDX contains the per-cpu offset
258	 */
259	movq	pcpu_hot + X86_current_task(%rdx), %rax
260	movq	TASK_threadsp(%rax), %rsp
261
262	/*
263	 * We must switch to a new descriptor in kernel space for the GDT
264	 * because soon the kernel won't have access anymore to the userspace
265	 * addresses where we're currently running on. We have to do that here
266	 * because in 32bit we couldn't load a 64bit linear address.
267	 */
268	lgdt	early_gdt_descr(%rip)
269
270	/* set up data segments */
271	xorl %eax,%eax
272	movl %eax,%ds
273	movl %eax,%ss
274	movl %eax,%es
275
276	/*
277	 * We don't really need to load %fs or %gs, but load them anyway
278	 * to kill any stale realmode selectors.  This allows execution
279	 * under VT hardware.
280	 */
281	movl %eax,%fs
282	movl %eax,%gs
283
284	/* Set up %gs.
285	 *
286	 * The base of %gs always points to fixed_percpu_data. If the
287	 * stack protector canary is enabled, it is located at %gs:40.
288	 * Note that, on SMP, the boot cpu uses init data section until
289	 * the per cpu areas are set up.
290	 */
291	movl	$MSR_GS_BASE,%ecx
292	movl	initial_gs(%rip),%eax
293	movl	initial_gs+4(%rip),%edx
294	wrmsr
295
296	/* Setup and Load IDT */
297	pushq	%rsi
298	call	early_setup_idt
299	popq	%rsi
300
301	/* Check if nx is implemented */
302	movl	$0x80000001, %eax
303	cpuid
304	movl	%edx,%edi
305
306	/* Setup EFER (Extended Feature Enable Register) */
307	movl	$MSR_EFER, %ecx
308	rdmsr
309	/*
310	 * Preserve current value of EFER for comparison and to skip
311	 * EFER writes if no change was made (for TDX guest)
312	 */
313	movl    %eax, %edx
314	btsl	$_EFER_SCE, %eax	/* Enable System Call */
315	btl	$20,%edi		/* No Execute supported? */
316	jnc     1f
317	btsl	$_EFER_NX, %eax
318	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
319
320	/* Avoid writing EFER if no change was made (for TDX guest) */
3211:	cmpl	%edx, %eax
322	je	1f
323	xor	%edx, %edx
324	wrmsr				/* Make changes effective */
3251:
326	/* Setup cr0 */
327	movl	$CR0_STATE, %eax
328	/* Make changes effective */
329	movq	%rax, %cr0
330
331	/* zero EFLAGS after setting rsp */
332	pushq $0
333	popfq
334
335	/* rsi is pointer to real mode structure with interesting info.
336	   pass it to C */
337	movq	%rsi, %rdi
338
339.Ljump_to_C_code:
340	/*
341	 * Jump to run C code and to be on a real kernel address.
342	 * Since we are running on identity-mapped space we have to jump
343	 * to the full 64bit address, this is only possible as indirect
344	 * jump.  In addition we need to ensure %cs is set so we make this
345	 * a far return.
346	 *
347	 * Note: do not change to far jump indirect with 64bit offset.
348	 *
349	 * AMD does not support far jump indirect with 64bit offset.
350	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
351	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
352	 *		with the target specified by a far pointer in memory.
353	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
354	 *		with the target specified by a far pointer in memory.
355	 *
356	 * Intel64 does support 64bit offset.
357	 * Software Developer Manual Vol 2: states:
358	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
359	 *		address given in m16:16
360	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
361	 *		address given in m16:32.
362	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
363	 *		address given in m16:64.
364	 */
365	pushq	$.Lafter_lret	# put return address on stack for unwinder
366	xorl	%ebp, %ebp	# clear frame pointer
367	movq	initial_code(%rip), %rax
368	pushq	$__KERNEL_CS	# set correct cs
369	pushq	%rax		# target address in negative space
370	lretq
371.Lafter_lret:
372	ANNOTATE_NOENDBR
373SYM_CODE_END(secondary_startup_64)
374
375#include "verify_cpu.S"
376#include "sev_verify_cbit.S"
377
378#ifdef CONFIG_HOTPLUG_CPU
379/*
380 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
381 * up already except stack. We just set up stack here. Then call
382 * start_secondary() via .Ljump_to_C_code.
383 */
384SYM_CODE_START(start_cpu0)
385	ANNOTATE_NOENDBR
386	UNWIND_HINT_EMPTY
387
388	/* Find the idle task stack */
389	movq	PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx
390	movq	TASK_threadsp(%rcx), %rsp
391
392	jmp	.Ljump_to_C_code
393SYM_CODE_END(start_cpu0)
394#endif
395
396#ifdef CONFIG_AMD_MEM_ENCRYPT
397/*
398 * VC Exception handler used during early boot when running on kernel
399 * addresses, but before the switch to the idt_table can be made.
400 * The early_idt_handler_array can't be used here because it calls into a lot
401 * of __init code and this handler is also used during CPU offlining/onlining.
402 * Therefore this handler ends up in the .text section so that it stays around
403 * when .init.text is freed.
404 */
405SYM_CODE_START_NOALIGN(vc_boot_ghcb)
406	UNWIND_HINT_IRET_REGS offset=8
407	ENDBR
408
409	ANNOTATE_UNRET_END
410
411	/* Build pt_regs */
412	PUSH_AND_CLEAR_REGS
413
414	/* Call C handler */
415	movq    %rsp, %rdi
416	movq	ORIG_RAX(%rsp), %rsi
417	movq	initial_vc_handler(%rip), %rax
418	ANNOTATE_RETPOLINE_SAFE
419	call	*%rax
420
421	/* Unwind pt_regs */
422	POP_REGS
423
424	/* Remove Error Code */
425	addq    $8, %rsp
426
427	iretq
428SYM_CODE_END(vc_boot_ghcb)
429#endif
430
431	/* Both SMP bootup and ACPI suspend change these variables */
432	__REFDATA
433	.balign	8
434SYM_DATA(initial_code,	.quad x86_64_start_kernel)
435SYM_DATA(initial_gs,	.quad INIT_PER_CPU_VAR(fixed_percpu_data))
436#ifdef CONFIG_AMD_MEM_ENCRYPT
437SYM_DATA(initial_vc_handler,	.quad handle_vc_boot_ghcb)
438#endif
439	__FINITDATA
440
441	__INIT
442SYM_CODE_START(early_idt_handler_array)
443	i = 0
444	.rept NUM_EXCEPTION_VECTORS
445	.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
446		UNWIND_HINT_IRET_REGS
447		ENDBR
448		pushq $0	# Dummy error code, to make stack frame uniform
449	.else
450		UNWIND_HINT_IRET_REGS offset=8
451		ENDBR
452	.endif
453	pushq $i		# 72(%rsp) Vector number
454	jmp early_idt_handler_common
455	UNWIND_HINT_IRET_REGS
456	i = i + 1
457	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
458	.endr
459SYM_CODE_END(early_idt_handler_array)
460	ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
461
462SYM_CODE_START_LOCAL(early_idt_handler_common)
463	UNWIND_HINT_IRET_REGS offset=16
464	ANNOTATE_UNRET_END
465	/*
466	 * The stack is the hardware frame, an error code or zero, and the
467	 * vector number.
468	 */
469	cld
470
471	incl early_recursion_flag(%rip)
472
473	/* The vector number is currently in the pt_regs->di slot. */
474	pushq %rsi				/* pt_regs->si */
475	movq 8(%rsp), %rsi			/* RSI = vector number */
476	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
477	pushq %rdx				/* pt_regs->dx */
478	pushq %rcx				/* pt_regs->cx */
479	pushq %rax				/* pt_regs->ax */
480	pushq %r8				/* pt_regs->r8 */
481	pushq %r9				/* pt_regs->r9 */
482	pushq %r10				/* pt_regs->r10 */
483	pushq %r11				/* pt_regs->r11 */
484	pushq %rbx				/* pt_regs->bx */
485	pushq %rbp				/* pt_regs->bp */
486	pushq %r12				/* pt_regs->r12 */
487	pushq %r13				/* pt_regs->r13 */
488	pushq %r14				/* pt_regs->r14 */
489	pushq %r15				/* pt_regs->r15 */
490	UNWIND_HINT_REGS
491
492	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
493	call do_early_exception
494
495	decl early_recursion_flag(%rip)
496	jmp restore_regs_and_return_to_kernel
497SYM_CODE_END(early_idt_handler_common)
498
499#ifdef CONFIG_AMD_MEM_ENCRYPT
500/*
501 * VC Exception handler used during very early boot. The
502 * early_idt_handler_array can't be used because it returns via the
503 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
504 *
505 * XXX it does, fix this.
506 *
507 * This handler will end up in the .init.text section and not be
508 * available to boot secondary CPUs.
509 */
510SYM_CODE_START_NOALIGN(vc_no_ghcb)
511	UNWIND_HINT_IRET_REGS offset=8
512	ENDBR
513
514	ANNOTATE_UNRET_END
515
516	/* Build pt_regs */
517	PUSH_AND_CLEAR_REGS
518
519	/* Call C handler */
520	movq    %rsp, %rdi
521	movq	ORIG_RAX(%rsp), %rsi
522	call    do_vc_no_ghcb
523
524	/* Unwind pt_regs */
525	POP_REGS
526
527	/* Remove Error Code */
528	addq    $8, %rsp
529
530	/* Pure iret required here - don't use INTERRUPT_RETURN */
531	iretq
532SYM_CODE_END(vc_no_ghcb)
533#endif
534
535#define SYM_DATA_START_PAGE_ALIGNED(name)			\
536	SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
537
538#ifdef CONFIG_PAGE_TABLE_ISOLATION
539/*
540 * Each PGD needs to be 8k long and 8k aligned.  We do not
541 * ever go out to userspace with these, so we do not
542 * strictly *need* the second page, but this allows us to
543 * have a single set_pgd() implementation that does not
544 * need to worry about whether it has 4k or 8k to work
545 * with.
546 *
547 * This ensures PGDs are 8k long:
548 */
549#define PTI_USER_PGD_FILL	512
550/* This ensures they are 8k-aligned: */
551#define SYM_DATA_START_PTI_ALIGNED(name) \
552	SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
553#else
554#define SYM_DATA_START_PTI_ALIGNED(name) \
555	SYM_DATA_START_PAGE_ALIGNED(name)
556#define PTI_USER_PGD_FILL	0
557#endif
558
559/* Automate the creation of 1 to 1 mapping pmd entries */
560#define PMDS(START, PERM, COUNT)			\
561	i = 0 ;						\
562	.rept (COUNT) ;					\
563	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
564	i = i + 1 ;					\
565	.endr
566
567	__INITDATA
568	.balign 4
569
570SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
571	.fill	512,8,0
572	.fill	PTI_USER_PGD_FILL,8,0
573SYM_DATA_END(early_top_pgt)
574
575SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
576	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
577SYM_DATA_END(early_dynamic_pgts)
578
579SYM_DATA(early_recursion_flag, .long 0)
580
581	.data
582
583#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
584SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
585	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
586	.org    init_top_pgt + L4_PAGE_OFFSET*8, 0
587	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
588	.org    init_top_pgt + L4_START_KERNEL*8, 0
589	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
590	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
591	.fill	PTI_USER_PGD_FILL,8,0
592SYM_DATA_END(init_top_pgt)
593
594SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
595	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
596	.fill	511, 8, 0
597SYM_DATA_END(level3_ident_pgt)
598SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
599	/*
600	 * Since I easily can, map the first 1G.
601	 * Don't set NX because code runs from these pages.
602	 *
603	 * Note: This sets _PAGE_GLOBAL despite whether
604	 * the CPU supports it or it is enabled.  But,
605	 * the CPU should ignore the bit.
606	 */
607	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
608SYM_DATA_END(level2_ident_pgt)
609#else
610SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
611	.fill	512,8,0
612	.fill	PTI_USER_PGD_FILL,8,0
613SYM_DATA_END(init_top_pgt)
614#endif
615
616#ifdef CONFIG_X86_5LEVEL
617SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
618	.fill	511,8,0
619	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
620SYM_DATA_END(level4_kernel_pgt)
621#endif
622
623SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
624	.fill	L3_START_KERNEL,8,0
625	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
626	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
627	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
628SYM_DATA_END(level3_kernel_pgt)
629
630SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
631	/*
632	 * Kernel high mapping.
633	 *
634	 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
635	 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
636	 * 512 MiB otherwise.
637	 *
638	 * (NOTE: after that starts the module area, see MODULES_VADDR.)
639	 *
640	 * This table is eventually used by the kernel during normal runtime.
641	 * Care must be taken to clear out undesired bits later, like _PAGE_RW
642	 * or _PAGE_GLOBAL in some cases.
643	 */
644	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
645SYM_DATA_END(level2_kernel_pgt)
646
647SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
648	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
649	pgtno = 0
650	.rept (FIXMAP_PMD_NUM)
651	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
652		+ _PAGE_TABLE_NOENC;
653	pgtno = pgtno + 1
654	.endr
655	/* 6 MB reserved space + a 2MB hole */
656	.fill	4,8,0
657SYM_DATA_END(level2_fixmap_pgt)
658
659SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
660	.rept (FIXMAP_PMD_NUM)
661	.fill	512,8,0
662	.endr
663SYM_DATA_END(level1_fixmap_pgt)
664
665#undef PMDS
666
667	.data
668	.align 16
669
670SYM_DATA(early_gdt_descr,		.word GDT_ENTRIES*8-1)
671SYM_DATA_LOCAL(early_gdt_descr_base,	.quad INIT_PER_CPU_VAR(gdt_page))
672
673	.align 16
674SYM_DATA(smpboot_control,		.long 0)
675
676	.align 16
677/* This must match the first entry in level2_kernel_pgt */
678SYM_DATA(phys_base, .quad 0x0)
679EXPORT_SYMBOL(phys_base)
680
681#include "../../x86/xen/xen-head.S"
682
683	__PAGE_ALIGNED_BSS
684SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
685	.skip PAGE_SIZE
686SYM_DATA_END(empty_zero_page)
687EXPORT_SYMBOL(empty_zero_page)
688
689