1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4 * 5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10 */ 11 12 13#include <linux/linkage.h> 14#include <linux/threads.h> 15#include <linux/init.h> 16#include <linux/pgtable.h> 17#include <asm/segment.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21#include <asm/processor-flags.h> 22#include <asm/percpu.h> 23#include <asm/nops.h> 24#include "../entry/calling.h" 25#include <asm/export.h> 26#include <asm/nospec-branch.h> 27#include <asm/fixmap.h> 28 29/* 30 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE 31 * because we need identity-mapped pages. 32 */ 33#define l4_index(x) (((x) >> 39) & 511) 34#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 35 36L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 37L4_START_KERNEL = l4_index(__START_KERNEL_map) 38 39L3_START_KERNEL = pud_index(__START_KERNEL_map) 40 41 .text 42 __HEAD 43 .code64 44SYM_CODE_START_NOALIGN(startup_64) 45 UNWIND_HINT_EMPTY 46 /* 47 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 48 * and someone has loaded an identity mapped page table 49 * for us. These identity mapped page tables map all of the 50 * kernel pages and possibly all of memory. 51 * 52 * %rsi holds a physical pointer to real_mode_data. 53 * 54 * We come here either directly from a 64bit bootloader, or from 55 * arch/x86/boot/compressed/head_64.S. 56 * 57 * We only come here initially at boot nothing else comes here. 58 * 59 * Since we may be loaded at an address different from what we were 60 * compiled to run at we first fixup the physical addresses in our page 61 * tables and then reload them. 62 */ 63 64 /* Set up the stack for verify_cpu() */ 65 leaq (__end_init_task - PTREGS_SIZE)(%rip), %rsp 66 67 leaq _text(%rip), %rdi 68 69 /* 70 * initial_gs points to initial fixed_percpu_data struct with storage for 71 * the stack protector canary. Global pointer fixups are needed at this 72 * stage, so apply them as is done in fixup_pointer(), and initialize %gs 73 * such that the canary can be accessed at %gs:40 for subsequent C calls. 74 */ 75 movl $MSR_GS_BASE, %ecx 76 movq initial_gs(%rip), %rax 77 movq $_text, %rdx 78 subq %rdx, %rax 79 addq %rdi, %rax 80 movq %rax, %rdx 81 shrq $32, %rdx 82 wrmsr 83 84 pushq %rsi 85 call startup_64_setup_env 86 popq %rsi 87 88#ifdef CONFIG_AMD_MEM_ENCRYPT 89 /* 90 * Activate SEV/SME memory encryption if supported/enabled. This needs to 91 * be done now, since this also includes setup of the SEV-SNP CPUID table, 92 * which needs to be done before any CPUID instructions are executed in 93 * subsequent code. 94 */ 95 movq %rsi, %rdi 96 pushq %rsi 97 call sme_enable 98 popq %rsi 99#endif 100 101 /* Now switch to __KERNEL_CS so IRET works reliably */ 102 pushq $__KERNEL_CS 103 leaq .Lon_kernel_cs(%rip), %rax 104 pushq %rax 105 lretq 106 107.Lon_kernel_cs: 108 UNWIND_HINT_EMPTY 109 110 /* Sanitize CPU configuration */ 111 call verify_cpu 112 113 /* 114 * Perform pagetable fixups. Additionally, if SME is active, encrypt 115 * the kernel and retrieve the modifier (SME encryption mask if SME 116 * is active) to be added to the initial pgdir entry that will be 117 * programmed into CR3. 118 */ 119 leaq _text(%rip), %rdi 120 pushq %rsi 121 call __startup_64 122 popq %rsi 123 124 /* Form the CR3 value being sure to include the CR3 modifier */ 125 addq $(early_top_pgt - __START_KERNEL_map), %rax 126 jmp 1f 127SYM_CODE_END(startup_64) 128 129SYM_CODE_START(secondary_startup_64) 130 UNWIND_HINT_EMPTY 131 ANNOTATE_NOENDBR 132 /* 133 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 134 * and someone has loaded a mapped page table. 135 * 136 * %rsi holds a physical pointer to real_mode_data. 137 * 138 * We come here either from startup_64 (using physical addresses) 139 * or from trampoline.S (using virtual addresses). 140 * 141 * Using virtual addresses from trampoline.S removes the need 142 * to have any identity mapped pages in the kernel page table 143 * after the boot processor executes this code. 144 */ 145 146 /* Sanitize CPU configuration */ 147 call verify_cpu 148 149 /* 150 * The secondary_startup_64_no_verify entry point is only used by 151 * SEV-ES guests. In those guests the call to verify_cpu() would cause 152 * #VC exceptions which can not be handled at this stage of secondary 153 * CPU bringup. 154 * 155 * All non SEV-ES systems, especially Intel systems, need to execute 156 * verify_cpu() above to make sure NX is enabled. 157 */ 158SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) 159 UNWIND_HINT_EMPTY 160 ANNOTATE_NOENDBR 161 162 /* 163 * Retrieve the modifier (SME encryption mask if SME is active) to be 164 * added to the initial pgdir entry that will be programmed into CR3. 165 */ 166#ifdef CONFIG_AMD_MEM_ENCRYPT 167 movq sme_me_mask, %rax 168#else 169 xorq %rax, %rax 170#endif 171 172 /* Form the CR3 value being sure to include the CR3 modifier */ 173 addq $(init_top_pgt - __START_KERNEL_map), %rax 1741: 175 176#ifdef CONFIG_X86_MCE 177 /* 178 * Preserve CR4.MCE if the kernel will enable #MC support. 179 * Clearing MCE may fault in some environments (that also force #MC 180 * support). Any machine check that occurs before #MC support is fully 181 * configured will crash the system regardless of the CR4.MCE value set 182 * here. 183 */ 184 movq %cr4, %rcx 185 andl $X86_CR4_MCE, %ecx 186#else 187 movl $0, %ecx 188#endif 189 190 /* Enable PAE mode, PGE and LA57 */ 191 orl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 192#ifdef CONFIG_X86_5LEVEL 193 testl $1, __pgtable_l5_enabled(%rip) 194 jz 1f 195 orl $X86_CR4_LA57, %ecx 1961: 197#endif 198 movq %rcx, %cr4 199 200 /* Setup early boot stage 4-/5-level pagetables. */ 201 addq phys_base(%rip), %rax 202 203 /* 204 * For SEV guests: Verify that the C-bit is correct. A malicious 205 * hypervisor could lie about the C-bit position to perform a ROP 206 * attack on the guest by writing to the unencrypted stack and wait for 207 * the next RET instruction. 208 * %rsi carries pointer to realmode data and is callee-clobbered. Save 209 * and restore it. 210 */ 211 pushq %rsi 212 movq %rax, %rdi 213 call sev_verify_cbit 214 popq %rsi 215 216 /* 217 * Switch to new page-table 218 * 219 * For the boot CPU this switches to early_top_pgt which still has the 220 * indentity mappings present. The secondary CPUs will switch to the 221 * init_top_pgt here, away from the trampoline_pgd and unmap the 222 * indentity mapped ranges. 223 */ 224 movq %rax, %cr3 225 226 /* 227 * Do a global TLB flush after the CR3 switch to make sure the TLB 228 * entries from the identity mapping are flushed. 229 */ 230 movq %cr4, %rcx 231 movq %rcx, %rax 232 xorq $X86_CR4_PGE, %rcx 233 movq %rcx, %cr4 234 movq %rax, %cr4 235 236 /* Ensure I am executing from virtual addresses */ 237 movq $1f, %rax 238 ANNOTATE_RETPOLINE_SAFE 239 jmp *%rax 2401: 241 UNWIND_HINT_EMPTY 242 ANNOTATE_NOENDBR // above 243 244#ifdef CONFIG_SMP 245 movl smpboot_control(%rip), %ecx 246 247 /* Get the per cpu offset for the given CPU# which is in ECX */ 248 movq __per_cpu_offset(,%rcx,8), %rdx 249#else 250 xorl %edx, %edx /* zero-extended to clear all of RDX */ 251#endif /* CONFIG_SMP */ 252 253 /* 254 * Setup a boot time stack - Any secondary CPU will have lost its stack 255 * by now because the cr3-switch above unmaps the real-mode stack. 256 * 257 * RDX contains the per-cpu offset 258 */ 259 movq pcpu_hot + X86_current_task(%rdx), %rax 260 movq TASK_threadsp(%rax), %rsp 261 262 /* 263 * We must switch to a new descriptor in kernel space for the GDT 264 * because soon the kernel won't have access anymore to the userspace 265 * addresses where we're currently running on. We have to do that here 266 * because in 32bit we couldn't load a 64bit linear address. 267 */ 268 subq $16, %rsp 269 movw $(GDT_SIZE-1), (%rsp) 270 leaq gdt_page(%rdx), %rax 271 movq %rax, 2(%rsp) 272 lgdt (%rsp) 273 addq $16, %rsp 274 275 /* set up data segments */ 276 xorl %eax,%eax 277 movl %eax,%ds 278 movl %eax,%ss 279 movl %eax,%es 280 281 /* 282 * We don't really need to load %fs or %gs, but load them anyway 283 * to kill any stale realmode selectors. This allows execution 284 * under VT hardware. 285 */ 286 movl %eax,%fs 287 movl %eax,%gs 288 289 /* Set up %gs. 290 * 291 * The base of %gs always points to fixed_percpu_data. If the 292 * stack protector canary is enabled, it is located at %gs:40. 293 * Note that, on SMP, the boot cpu uses init data section until 294 * the per cpu areas are set up. 295 */ 296 movl $MSR_GS_BASE,%ecx 297 movl initial_gs(%rip),%eax 298 movl initial_gs+4(%rip),%edx 299 wrmsr 300 301 /* Setup and Load IDT */ 302 pushq %rsi 303 call early_setup_idt 304 popq %rsi 305 306 /* Check if nx is implemented */ 307 movl $0x80000001, %eax 308 cpuid 309 movl %edx,%edi 310 311 /* Setup EFER (Extended Feature Enable Register) */ 312 movl $MSR_EFER, %ecx 313 rdmsr 314 /* 315 * Preserve current value of EFER for comparison and to skip 316 * EFER writes if no change was made (for TDX guest) 317 */ 318 movl %eax, %edx 319 btsl $_EFER_SCE, %eax /* Enable System Call */ 320 btl $20,%edi /* No Execute supported? */ 321 jnc 1f 322 btsl $_EFER_NX, %eax 323 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 324 325 /* Avoid writing EFER if no change was made (for TDX guest) */ 3261: cmpl %edx, %eax 327 je 1f 328 xor %edx, %edx 329 wrmsr /* Make changes effective */ 3301: 331 /* Setup cr0 */ 332 movl $CR0_STATE, %eax 333 /* Make changes effective */ 334 movq %rax, %cr0 335 336 /* zero EFLAGS after setting rsp */ 337 pushq $0 338 popfq 339 340 /* rsi is pointer to real mode structure with interesting info. 341 pass it to C */ 342 movq %rsi, %rdi 343 344.Ljump_to_C_code: 345 /* 346 * Jump to run C code and to be on a real kernel address. 347 * Since we are running on identity-mapped space we have to jump 348 * to the full 64bit address, this is only possible as indirect 349 * jump. In addition we need to ensure %cs is set so we make this 350 * a far return. 351 * 352 * Note: do not change to far jump indirect with 64bit offset. 353 * 354 * AMD does not support far jump indirect with 64bit offset. 355 * AMD64 Architecture Programmer's Manual, Volume 3: states only 356 * JMP FAR mem16:16 FF /5 Far jump indirect, 357 * with the target specified by a far pointer in memory. 358 * JMP FAR mem16:32 FF /5 Far jump indirect, 359 * with the target specified by a far pointer in memory. 360 * 361 * Intel64 does support 64bit offset. 362 * Software Developer Manual Vol 2: states: 363 * FF /5 JMP m16:16 Jump far, absolute indirect, 364 * address given in m16:16 365 * FF /5 JMP m16:32 Jump far, absolute indirect, 366 * address given in m16:32. 367 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 368 * address given in m16:64. 369 */ 370 pushq $.Lafter_lret # put return address on stack for unwinder 371 xorl %ebp, %ebp # clear frame pointer 372 movq initial_code(%rip), %rax 373 pushq $__KERNEL_CS # set correct cs 374 pushq %rax # target address in negative space 375 lretq 376.Lafter_lret: 377 ANNOTATE_NOENDBR 378SYM_CODE_END(secondary_startup_64) 379 380#include "verify_cpu.S" 381#include "sev_verify_cbit.S" 382 383#ifdef CONFIG_HOTPLUG_CPU 384/* 385 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 386 * up already except stack. We just set up stack here. Then call 387 * start_secondary() via .Ljump_to_C_code. 388 */ 389SYM_CODE_START(start_cpu0) 390 ANNOTATE_NOENDBR 391 UNWIND_HINT_EMPTY 392 393 /* Find the idle task stack */ 394 movq PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx 395 movq TASK_threadsp(%rcx), %rsp 396 397 jmp .Ljump_to_C_code 398SYM_CODE_END(start_cpu0) 399#endif 400 401#ifdef CONFIG_AMD_MEM_ENCRYPT 402/* 403 * VC Exception handler used during early boot when running on kernel 404 * addresses, but before the switch to the idt_table can be made. 405 * The early_idt_handler_array can't be used here because it calls into a lot 406 * of __init code and this handler is also used during CPU offlining/onlining. 407 * Therefore this handler ends up in the .text section so that it stays around 408 * when .init.text is freed. 409 */ 410SYM_CODE_START_NOALIGN(vc_boot_ghcb) 411 UNWIND_HINT_IRET_REGS offset=8 412 ENDBR 413 414 ANNOTATE_UNRET_END 415 416 /* Build pt_regs */ 417 PUSH_AND_CLEAR_REGS 418 419 /* Call C handler */ 420 movq %rsp, %rdi 421 movq ORIG_RAX(%rsp), %rsi 422 movq initial_vc_handler(%rip), %rax 423 ANNOTATE_RETPOLINE_SAFE 424 call *%rax 425 426 /* Unwind pt_regs */ 427 POP_REGS 428 429 /* Remove Error Code */ 430 addq $8, %rsp 431 432 iretq 433SYM_CODE_END(vc_boot_ghcb) 434#endif 435 436 /* Both SMP bootup and ACPI suspend change these variables */ 437 __REFDATA 438 .balign 8 439SYM_DATA(initial_code, .quad x86_64_start_kernel) 440SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) 441#ifdef CONFIG_AMD_MEM_ENCRYPT 442SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) 443#endif 444 __FINITDATA 445 446 __INIT 447SYM_CODE_START(early_idt_handler_array) 448 i = 0 449 .rept NUM_EXCEPTION_VECTORS 450 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 451 UNWIND_HINT_IRET_REGS 452 ENDBR 453 pushq $0 # Dummy error code, to make stack frame uniform 454 .else 455 UNWIND_HINT_IRET_REGS offset=8 456 ENDBR 457 .endif 458 pushq $i # 72(%rsp) Vector number 459 jmp early_idt_handler_common 460 UNWIND_HINT_IRET_REGS 461 i = i + 1 462 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 463 .endr 464SYM_CODE_END(early_idt_handler_array) 465 ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS] 466 467SYM_CODE_START_LOCAL(early_idt_handler_common) 468 UNWIND_HINT_IRET_REGS offset=16 469 ANNOTATE_UNRET_END 470 /* 471 * The stack is the hardware frame, an error code or zero, and the 472 * vector number. 473 */ 474 cld 475 476 incl early_recursion_flag(%rip) 477 478 /* The vector number is currently in the pt_regs->di slot. */ 479 pushq %rsi /* pt_regs->si */ 480 movq 8(%rsp), %rsi /* RSI = vector number */ 481 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 482 pushq %rdx /* pt_regs->dx */ 483 pushq %rcx /* pt_regs->cx */ 484 pushq %rax /* pt_regs->ax */ 485 pushq %r8 /* pt_regs->r8 */ 486 pushq %r9 /* pt_regs->r9 */ 487 pushq %r10 /* pt_regs->r10 */ 488 pushq %r11 /* pt_regs->r11 */ 489 pushq %rbx /* pt_regs->bx */ 490 pushq %rbp /* pt_regs->bp */ 491 pushq %r12 /* pt_regs->r12 */ 492 pushq %r13 /* pt_regs->r13 */ 493 pushq %r14 /* pt_regs->r14 */ 494 pushq %r15 /* pt_regs->r15 */ 495 UNWIND_HINT_REGS 496 497 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 498 call do_early_exception 499 500 decl early_recursion_flag(%rip) 501 jmp restore_regs_and_return_to_kernel 502SYM_CODE_END(early_idt_handler_common) 503 504#ifdef CONFIG_AMD_MEM_ENCRYPT 505/* 506 * VC Exception handler used during very early boot. The 507 * early_idt_handler_array can't be used because it returns via the 508 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early. 509 * 510 * XXX it does, fix this. 511 * 512 * This handler will end up in the .init.text section and not be 513 * available to boot secondary CPUs. 514 */ 515SYM_CODE_START_NOALIGN(vc_no_ghcb) 516 UNWIND_HINT_IRET_REGS offset=8 517 ENDBR 518 519 ANNOTATE_UNRET_END 520 521 /* Build pt_regs */ 522 PUSH_AND_CLEAR_REGS 523 524 /* Call C handler */ 525 movq %rsp, %rdi 526 movq ORIG_RAX(%rsp), %rsi 527 call do_vc_no_ghcb 528 529 /* Unwind pt_regs */ 530 POP_REGS 531 532 /* Remove Error Code */ 533 addq $8, %rsp 534 535 /* Pure iret required here - don't use INTERRUPT_RETURN */ 536 iretq 537SYM_CODE_END(vc_no_ghcb) 538#endif 539 540#define SYM_DATA_START_PAGE_ALIGNED(name) \ 541 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) 542 543#ifdef CONFIG_PAGE_TABLE_ISOLATION 544/* 545 * Each PGD needs to be 8k long and 8k aligned. We do not 546 * ever go out to userspace with these, so we do not 547 * strictly *need* the second page, but this allows us to 548 * have a single set_pgd() implementation that does not 549 * need to worry about whether it has 4k or 8k to work 550 * with. 551 * 552 * This ensures PGDs are 8k long: 553 */ 554#define PTI_USER_PGD_FILL 512 555/* This ensures they are 8k-aligned: */ 556#define SYM_DATA_START_PTI_ALIGNED(name) \ 557 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) 558#else 559#define SYM_DATA_START_PTI_ALIGNED(name) \ 560 SYM_DATA_START_PAGE_ALIGNED(name) 561#define PTI_USER_PGD_FILL 0 562#endif 563 564/* Automate the creation of 1 to 1 mapping pmd entries */ 565#define PMDS(START, PERM, COUNT) \ 566 i = 0 ; \ 567 .rept (COUNT) ; \ 568 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 569 i = i + 1 ; \ 570 .endr 571 572 __INITDATA 573 .balign 4 574 575SYM_DATA_START_PTI_ALIGNED(early_top_pgt) 576 .fill 512,8,0 577 .fill PTI_USER_PGD_FILL,8,0 578SYM_DATA_END(early_top_pgt) 579 580SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) 581 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 582SYM_DATA_END(early_dynamic_pgts) 583 584SYM_DATA(early_recursion_flag, .long 0) 585 586 .data 587 588#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) 589SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 590 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 591 .org init_top_pgt + L4_PAGE_OFFSET*8, 0 592 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 593 .org init_top_pgt + L4_START_KERNEL*8, 0 594 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 595 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 596 .fill PTI_USER_PGD_FILL,8,0 597SYM_DATA_END(init_top_pgt) 598 599SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) 600 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 601 .fill 511, 8, 0 602SYM_DATA_END(level3_ident_pgt) 603SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) 604 /* 605 * Since I easily can, map the first 1G. 606 * Don't set NX because code runs from these pages. 607 * 608 * Note: This sets _PAGE_GLOBAL despite whether 609 * the CPU supports it or it is enabled. But, 610 * the CPU should ignore the bit. 611 */ 612 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 613SYM_DATA_END(level2_ident_pgt) 614#else 615SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 616 .fill 512,8,0 617 .fill PTI_USER_PGD_FILL,8,0 618SYM_DATA_END(init_top_pgt) 619#endif 620 621#ifdef CONFIG_X86_5LEVEL 622SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) 623 .fill 511,8,0 624 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 625SYM_DATA_END(level4_kernel_pgt) 626#endif 627 628SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) 629 .fill L3_START_KERNEL,8,0 630 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 631 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 632 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 633SYM_DATA_END(level3_kernel_pgt) 634 635SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) 636 /* 637 * Kernel high mapping. 638 * 639 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in 640 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, 641 * 512 MiB otherwise. 642 * 643 * (NOTE: after that starts the module area, see MODULES_VADDR.) 644 * 645 * This table is eventually used by the kernel during normal runtime. 646 * Care must be taken to clear out undesired bits later, like _PAGE_RW 647 * or _PAGE_GLOBAL in some cases. 648 */ 649 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) 650SYM_DATA_END(level2_kernel_pgt) 651 652SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) 653 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 654 pgtno = 0 655 .rept (FIXMAP_PMD_NUM) 656 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ 657 + _PAGE_TABLE_NOENC; 658 pgtno = pgtno + 1 659 .endr 660 /* 6 MB reserved space + a 2MB hole */ 661 .fill 4,8,0 662SYM_DATA_END(level2_fixmap_pgt) 663 664SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) 665 .rept (FIXMAP_PMD_NUM) 666 .fill 512,8,0 667 .endr 668SYM_DATA_END(level1_fixmap_pgt) 669 670#undef PMDS 671 672 .data 673 .align 16 674 675SYM_DATA(smpboot_control, .long 0) 676 677 .align 16 678/* This must match the first entry in level2_kernel_pgt */ 679SYM_DATA(phys_base, .quad 0x0) 680EXPORT_SYMBOL(phys_base) 681 682#include "../../x86/xen/xen-head.S" 683 684 __PAGE_ALIGNED_BSS 685SYM_DATA_START_PAGE_ALIGNED(empty_zero_page) 686 .skip PAGE_SIZE 687SYM_DATA_END(empty_zero_page) 688EXPORT_SYMBOL(empty_zero_page) 689 690