1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4 * 5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10 */ 11 12 13#include <linux/linkage.h> 14#include <linux/threads.h> 15#include <linux/init.h> 16#include <linux/pgtable.h> 17#include <asm/segment.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21#include <asm/processor-flags.h> 22#include <asm/percpu.h> 23#include <asm/nops.h> 24#include "../entry/calling.h" 25#include <asm/export.h> 26#include <asm/nospec-branch.h> 27#include <asm/fixmap.h> 28 29/* 30 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE 31 * because we need identity-mapped pages. 32 */ 33#define l4_index(x) (((x) >> 39) & 511) 34#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 35 36L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 37L4_START_KERNEL = l4_index(__START_KERNEL_map) 38 39L3_START_KERNEL = pud_index(__START_KERNEL_map) 40 41 .text 42 __HEAD 43 .code64 44SYM_CODE_START_NOALIGN(startup_64) 45 UNWIND_HINT_EMPTY 46 /* 47 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 48 * and someone has loaded an identity mapped page table 49 * for us. These identity mapped page tables map all of the 50 * kernel pages and possibly all of memory. 51 * 52 * %rsi holds a physical pointer to real_mode_data. 53 * 54 * We come here either directly from a 64bit bootloader, or from 55 * arch/x86/boot/compressed/head_64.S. 56 * 57 * We only come here initially at boot nothing else comes here. 58 * 59 * Since we may be loaded at an address different from what we were 60 * compiled to run at we first fixup the physical addresses in our page 61 * tables and then reload them. 62 */ 63 64 /* Set up the stack for verify_cpu(), similar to initial_stack below */ 65 leaq (__end_init_task - FRAME_SIZE)(%rip), %rsp 66 67 leaq _text(%rip), %rdi 68 pushq %rsi 69 call startup_64_setup_env 70 popq %rsi 71 72#ifdef CONFIG_AMD_MEM_ENCRYPT 73 /* 74 * Activate SEV/SME memory encryption if supported/enabled. This needs to 75 * be done now, since this also includes setup of the SEV-SNP CPUID table, 76 * which needs to be done before any CPUID instructions are executed in 77 * subsequent code. 78 */ 79 movq %rsi, %rdi 80 pushq %rsi 81 call sme_enable 82 popq %rsi 83#endif 84 85 /* Now switch to __KERNEL_CS so IRET works reliably */ 86 pushq $__KERNEL_CS 87 leaq .Lon_kernel_cs(%rip), %rax 88 pushq %rax 89 lretq 90 91.Lon_kernel_cs: 92 UNWIND_HINT_EMPTY 93 94 /* Sanitize CPU configuration */ 95 call verify_cpu 96 97 /* 98 * Perform pagetable fixups. Additionally, if SME is active, encrypt 99 * the kernel and retrieve the modifier (SME encryption mask if SME 100 * is active) to be added to the initial pgdir entry that will be 101 * programmed into CR3. 102 */ 103 leaq _text(%rip), %rdi 104 pushq %rsi 105 call __startup_64 106 popq %rsi 107 108 /* Form the CR3 value being sure to include the CR3 modifier */ 109 addq $(early_top_pgt - __START_KERNEL_map), %rax 110 jmp 1f 111SYM_CODE_END(startup_64) 112 113SYM_CODE_START(secondary_startup_64) 114 UNWIND_HINT_EMPTY 115 ANNOTATE_NOENDBR 116 /* 117 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 118 * and someone has loaded a mapped page table. 119 * 120 * %rsi holds a physical pointer to real_mode_data. 121 * 122 * We come here either from startup_64 (using physical addresses) 123 * or from trampoline.S (using virtual addresses). 124 * 125 * Using virtual addresses from trampoline.S removes the need 126 * to have any identity mapped pages in the kernel page table 127 * after the boot processor executes this code. 128 */ 129 130 /* Sanitize CPU configuration */ 131 call verify_cpu 132 133 /* 134 * The secondary_startup_64_no_verify entry point is only used by 135 * SEV-ES guests. In those guests the call to verify_cpu() would cause 136 * #VC exceptions which can not be handled at this stage of secondary 137 * CPU bringup. 138 * 139 * All non SEV-ES systems, especially Intel systems, need to execute 140 * verify_cpu() above to make sure NX is enabled. 141 */ 142SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) 143 UNWIND_HINT_EMPTY 144 ANNOTATE_NOENDBR 145 146 /* 147 * Retrieve the modifier (SME encryption mask if SME is active) to be 148 * added to the initial pgdir entry that will be programmed into CR3. 149 */ 150 pushq %rsi 151 call __startup_secondary_64 152 popq %rsi 153 154 /* Form the CR3 value being sure to include the CR3 modifier */ 155 addq $(init_top_pgt - __START_KERNEL_map), %rax 1561: 157 158 /* Enable PAE mode, PGE and LA57 */ 159 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 160#ifdef CONFIG_X86_5LEVEL 161 testl $1, __pgtable_l5_enabled(%rip) 162 jz 1f 163 orl $X86_CR4_LA57, %ecx 1641: 165#endif 166 movq %rcx, %cr4 167 168 /* Setup early boot stage 4-/5-level pagetables. */ 169 addq phys_base(%rip), %rax 170 171 /* 172 * For SEV guests: Verify that the C-bit is correct. A malicious 173 * hypervisor could lie about the C-bit position to perform a ROP 174 * attack on the guest by writing to the unencrypted stack and wait for 175 * the next RET instruction. 176 * %rsi carries pointer to realmode data and is callee-clobbered. Save 177 * and restore it. 178 */ 179 pushq %rsi 180 movq %rax, %rdi 181 call sev_verify_cbit 182 popq %rsi 183 184 /* 185 * Switch to new page-table 186 * 187 * For the boot CPU this switches to early_top_pgt which still has the 188 * indentity mappings present. The secondary CPUs will switch to the 189 * init_top_pgt here, away from the trampoline_pgd and unmap the 190 * indentity mapped ranges. 191 */ 192 movq %rax, %cr3 193 194 /* 195 * Do a global TLB flush after the CR3 switch to make sure the TLB 196 * entries from the identity mapping are flushed. 197 */ 198 movq %cr4, %rcx 199 movq %rcx, %rax 200 xorq $X86_CR4_PGE, %rcx 201 movq %rcx, %cr4 202 movq %rax, %cr4 203 204 /* Ensure I am executing from virtual addresses */ 205 movq $1f, %rax 206 ANNOTATE_RETPOLINE_SAFE 207 jmp *%rax 2081: 209 UNWIND_HINT_EMPTY 210 ANNOTATE_NOENDBR // above 211 212 /* 213 * We must switch to a new descriptor in kernel space for the GDT 214 * because soon the kernel won't have access anymore to the userspace 215 * addresses where we're currently running on. We have to do that here 216 * because in 32bit we couldn't load a 64bit linear address. 217 */ 218 lgdt early_gdt_descr(%rip) 219 220 /* set up data segments */ 221 xorl %eax,%eax 222 movl %eax,%ds 223 movl %eax,%ss 224 movl %eax,%es 225 226 /* 227 * We don't really need to load %fs or %gs, but load them anyway 228 * to kill any stale realmode selectors. This allows execution 229 * under VT hardware. 230 */ 231 movl %eax,%fs 232 movl %eax,%gs 233 234 /* Set up %gs. 235 * 236 * The base of %gs always points to fixed_percpu_data. If the 237 * stack protector canary is enabled, it is located at %gs:40. 238 * Note that, on SMP, the boot cpu uses init data section until 239 * the per cpu areas are set up. 240 */ 241 movl $MSR_GS_BASE,%ecx 242 movl initial_gs(%rip),%eax 243 movl initial_gs+4(%rip),%edx 244 wrmsr 245 246 /* 247 * Setup a boot time stack - Any secondary CPU will have lost its stack 248 * by now because the cr3-switch above unmaps the real-mode stack 249 */ 250 movq initial_stack(%rip), %rsp 251 252 /* Setup and Load IDT */ 253 pushq %rsi 254 call early_setup_idt 255 popq %rsi 256 257 /* Check if nx is implemented */ 258 movl $0x80000001, %eax 259 cpuid 260 movl %edx,%edi 261 262 /* Setup EFER (Extended Feature Enable Register) */ 263 movl $MSR_EFER, %ecx 264 rdmsr 265 btsl $_EFER_SCE, %eax /* Enable System Call */ 266 btl $20,%edi /* No Execute supported? */ 267 jnc 1f 268 btsl $_EFER_NX, %eax 269 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 2701: wrmsr /* Make changes effective */ 271 272 /* Setup cr0 */ 273 movl $CR0_STATE, %eax 274 /* Make changes effective */ 275 movq %rax, %cr0 276 277 /* zero EFLAGS after setting rsp */ 278 pushq $0 279 popfq 280 281 /* rsi is pointer to real mode structure with interesting info. 282 pass it to C */ 283 movq %rsi, %rdi 284 285.Ljump_to_C_code: 286 /* 287 * Jump to run C code and to be on a real kernel address. 288 * Since we are running on identity-mapped space we have to jump 289 * to the full 64bit address, this is only possible as indirect 290 * jump. In addition we need to ensure %cs is set so we make this 291 * a far return. 292 * 293 * Note: do not change to far jump indirect with 64bit offset. 294 * 295 * AMD does not support far jump indirect with 64bit offset. 296 * AMD64 Architecture Programmer's Manual, Volume 3: states only 297 * JMP FAR mem16:16 FF /5 Far jump indirect, 298 * with the target specified by a far pointer in memory. 299 * JMP FAR mem16:32 FF /5 Far jump indirect, 300 * with the target specified by a far pointer in memory. 301 * 302 * Intel64 does support 64bit offset. 303 * Software Developer Manual Vol 2: states: 304 * FF /5 JMP m16:16 Jump far, absolute indirect, 305 * address given in m16:16 306 * FF /5 JMP m16:32 Jump far, absolute indirect, 307 * address given in m16:32. 308 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 309 * address given in m16:64. 310 */ 311 pushq $.Lafter_lret # put return address on stack for unwinder 312 xorl %ebp, %ebp # clear frame pointer 313 movq initial_code(%rip), %rax 314 pushq $__KERNEL_CS # set correct cs 315 pushq %rax # target address in negative space 316 lretq 317.Lafter_lret: 318 ANNOTATE_NOENDBR 319SYM_CODE_END(secondary_startup_64) 320 321#include "verify_cpu.S" 322#include "sev_verify_cbit.S" 323 324#ifdef CONFIG_HOTPLUG_CPU 325/* 326 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 327 * up already except stack. We just set up stack here. Then call 328 * start_secondary() via .Ljump_to_C_code. 329 */ 330SYM_CODE_START(start_cpu0) 331 UNWIND_HINT_EMPTY 332 movq initial_stack(%rip), %rsp 333 jmp .Ljump_to_C_code 334SYM_CODE_END(start_cpu0) 335#endif 336 337#ifdef CONFIG_AMD_MEM_ENCRYPT 338/* 339 * VC Exception handler used during early boot when running on kernel 340 * addresses, but before the switch to the idt_table can be made. 341 * The early_idt_handler_array can't be used here because it calls into a lot 342 * of __init code and this handler is also used during CPU offlining/onlining. 343 * Therefore this handler ends up in the .text section so that it stays around 344 * when .init.text is freed. 345 */ 346SYM_CODE_START_NOALIGN(vc_boot_ghcb) 347 UNWIND_HINT_IRET_REGS offset=8 348 ENDBR 349 350 /* Build pt_regs */ 351 PUSH_AND_CLEAR_REGS 352 353 /* Call C handler */ 354 movq %rsp, %rdi 355 movq ORIG_RAX(%rsp), %rsi 356 movq initial_vc_handler(%rip), %rax 357 ANNOTATE_RETPOLINE_SAFE 358 call *%rax 359 360 /* Unwind pt_regs */ 361 POP_REGS 362 363 /* Remove Error Code */ 364 addq $8, %rsp 365 366 iretq 367SYM_CODE_END(vc_boot_ghcb) 368#endif 369 370 /* Both SMP bootup and ACPI suspend change these variables */ 371 __REFDATA 372 .balign 8 373SYM_DATA(initial_code, .quad x86_64_start_kernel) 374SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) 375#ifdef CONFIG_AMD_MEM_ENCRYPT 376SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) 377#endif 378 379/* 380 * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder 381 * reliably detect the end of the stack. 382 */ 383SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE) 384 __FINITDATA 385 386 __INIT 387SYM_CODE_START(early_idt_handler_array) 388 i = 0 389 .rept NUM_EXCEPTION_VECTORS 390 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 391 UNWIND_HINT_IRET_REGS 392 ENDBR 393 pushq $0 # Dummy error code, to make stack frame uniform 394 .else 395 UNWIND_HINT_IRET_REGS offset=8 396 ENDBR 397 .endif 398 pushq $i # 72(%rsp) Vector number 399 jmp early_idt_handler_common 400 UNWIND_HINT_IRET_REGS 401 i = i + 1 402 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 403 .endr 404SYM_CODE_END(early_idt_handler_array) 405 ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS] 406 407SYM_CODE_START_LOCAL(early_idt_handler_common) 408 UNWIND_HINT_IRET_REGS offset=16 409 /* 410 * The stack is the hardware frame, an error code or zero, and the 411 * vector number. 412 */ 413 cld 414 415 incl early_recursion_flag(%rip) 416 417 /* The vector number is currently in the pt_regs->di slot. */ 418 pushq %rsi /* pt_regs->si */ 419 movq 8(%rsp), %rsi /* RSI = vector number */ 420 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 421 pushq %rdx /* pt_regs->dx */ 422 pushq %rcx /* pt_regs->cx */ 423 pushq %rax /* pt_regs->ax */ 424 pushq %r8 /* pt_regs->r8 */ 425 pushq %r9 /* pt_regs->r9 */ 426 pushq %r10 /* pt_regs->r10 */ 427 pushq %r11 /* pt_regs->r11 */ 428 pushq %rbx /* pt_regs->bx */ 429 pushq %rbp /* pt_regs->bp */ 430 pushq %r12 /* pt_regs->r12 */ 431 pushq %r13 /* pt_regs->r13 */ 432 pushq %r14 /* pt_regs->r14 */ 433 pushq %r15 /* pt_regs->r15 */ 434 UNWIND_HINT_REGS 435 436 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 437 call do_early_exception 438 439 decl early_recursion_flag(%rip) 440 jmp restore_regs_and_return_to_kernel 441SYM_CODE_END(early_idt_handler_common) 442 443#ifdef CONFIG_AMD_MEM_ENCRYPT 444/* 445 * VC Exception handler used during very early boot. The 446 * early_idt_handler_array can't be used because it returns via the 447 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early. 448 * 449 * XXX it does, fix this. 450 * 451 * This handler will end up in the .init.text section and not be 452 * available to boot secondary CPUs. 453 */ 454SYM_CODE_START_NOALIGN(vc_no_ghcb) 455 UNWIND_HINT_IRET_REGS offset=8 456 ENDBR 457 458 /* Build pt_regs */ 459 PUSH_AND_CLEAR_REGS 460 461 /* Call C handler */ 462 movq %rsp, %rdi 463 movq ORIG_RAX(%rsp), %rsi 464 call do_vc_no_ghcb 465 466 /* Unwind pt_regs */ 467 POP_REGS 468 469 /* Remove Error Code */ 470 addq $8, %rsp 471 472 /* Pure iret required here - don't use INTERRUPT_RETURN */ 473 iretq 474SYM_CODE_END(vc_no_ghcb) 475#endif 476 477#define SYM_DATA_START_PAGE_ALIGNED(name) \ 478 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) 479 480#ifdef CONFIG_PAGE_TABLE_ISOLATION 481/* 482 * Each PGD needs to be 8k long and 8k aligned. We do not 483 * ever go out to userspace with these, so we do not 484 * strictly *need* the second page, but this allows us to 485 * have a single set_pgd() implementation that does not 486 * need to worry about whether it has 4k or 8k to work 487 * with. 488 * 489 * This ensures PGDs are 8k long: 490 */ 491#define PTI_USER_PGD_FILL 512 492/* This ensures they are 8k-aligned: */ 493#define SYM_DATA_START_PTI_ALIGNED(name) \ 494 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) 495#else 496#define SYM_DATA_START_PTI_ALIGNED(name) \ 497 SYM_DATA_START_PAGE_ALIGNED(name) 498#define PTI_USER_PGD_FILL 0 499#endif 500 501/* Automate the creation of 1 to 1 mapping pmd entries */ 502#define PMDS(START, PERM, COUNT) \ 503 i = 0 ; \ 504 .rept (COUNT) ; \ 505 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 506 i = i + 1 ; \ 507 .endr 508 509 __INITDATA 510 .balign 4 511 512SYM_DATA_START_PTI_ALIGNED(early_top_pgt) 513 .fill 512,8,0 514 .fill PTI_USER_PGD_FILL,8,0 515SYM_DATA_END(early_top_pgt) 516 517SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) 518 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 519SYM_DATA_END(early_dynamic_pgts) 520 521SYM_DATA(early_recursion_flag, .long 0) 522 523 .data 524 525#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) 526SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 527 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 528 .org init_top_pgt + L4_PAGE_OFFSET*8, 0 529 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 530 .org init_top_pgt + L4_START_KERNEL*8, 0 531 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 532 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 533 .fill PTI_USER_PGD_FILL,8,0 534SYM_DATA_END(init_top_pgt) 535 536SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) 537 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 538 .fill 511, 8, 0 539SYM_DATA_END(level3_ident_pgt) 540SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) 541 /* 542 * Since I easily can, map the first 1G. 543 * Don't set NX because code runs from these pages. 544 * 545 * Note: This sets _PAGE_GLOBAL despite whether 546 * the CPU supports it or it is enabled. But, 547 * the CPU should ignore the bit. 548 */ 549 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 550SYM_DATA_END(level2_ident_pgt) 551#else 552SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 553 .fill 512,8,0 554 .fill PTI_USER_PGD_FILL,8,0 555SYM_DATA_END(init_top_pgt) 556#endif 557 558#ifdef CONFIG_X86_5LEVEL 559SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) 560 .fill 511,8,0 561 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 562SYM_DATA_END(level4_kernel_pgt) 563#endif 564 565SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) 566 .fill L3_START_KERNEL,8,0 567 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 568 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 569 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 570SYM_DATA_END(level3_kernel_pgt) 571 572SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) 573 /* 574 * Kernel high mapping. 575 * 576 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in 577 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, 578 * 512 MiB otherwise. 579 * 580 * (NOTE: after that starts the module area, see MODULES_VADDR.) 581 * 582 * This table is eventually used by the kernel during normal runtime. 583 * Care must be taken to clear out undesired bits later, like _PAGE_RW 584 * or _PAGE_GLOBAL in some cases. 585 */ 586 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) 587SYM_DATA_END(level2_kernel_pgt) 588 589SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) 590 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 591 pgtno = 0 592 .rept (FIXMAP_PMD_NUM) 593 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ 594 + _PAGE_TABLE_NOENC; 595 pgtno = pgtno + 1 596 .endr 597 /* 6 MB reserved space + a 2MB hole */ 598 .fill 4,8,0 599SYM_DATA_END(level2_fixmap_pgt) 600 601SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) 602 .rept (FIXMAP_PMD_NUM) 603 .fill 512,8,0 604 .endr 605SYM_DATA_END(level1_fixmap_pgt) 606 607#undef PMDS 608 609 .data 610 .align 16 611 612SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1) 613SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page)) 614 615 .align 16 616/* This must match the first entry in level2_kernel_pgt */ 617SYM_DATA(phys_base, .quad 0x0) 618EXPORT_SYMBOL(phys_base) 619 620#include "../../x86/xen/xen-head.S" 621 622 __PAGE_ALIGNED_BSS 623SYM_DATA_START_PAGE_ALIGNED(empty_zero_page) 624 .skip PAGE_SIZE 625SYM_DATA_END(empty_zero_page) 626EXPORT_SYMBOL(empty_zero_page) 627 628