1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4 * 5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10 */ 11 12 13#include <linux/linkage.h> 14#include <linux/threads.h> 15#include <linux/init.h> 16#include <linux/pgtable.h> 17#include <asm/segment.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21#include <asm/processor-flags.h> 22#include <asm/percpu.h> 23#include <asm/nops.h> 24#include "../entry/calling.h" 25#include <asm/export.h> 26#include <asm/nospec-branch.h> 27#include <asm/fixmap.h> 28 29/* 30 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE 31 * because we need identity-mapped pages. 32 */ 33#define l4_index(x) (((x) >> 39) & 511) 34#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 35 36L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 37L4_START_KERNEL = l4_index(__START_KERNEL_map) 38 39L3_START_KERNEL = pud_index(__START_KERNEL_map) 40 41 .text 42 __HEAD 43 .code64 44SYM_CODE_START_NOALIGN(startup_64) 45 UNWIND_HINT_EMPTY 46 /* 47 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 48 * and someone has loaded an identity mapped page table 49 * for us. These identity mapped page tables map all of the 50 * kernel pages and possibly all of memory. 51 * 52 * %rsi holds a physical pointer to real_mode_data. 53 * 54 * We come here either directly from a 64bit bootloader, or from 55 * arch/x86/boot/compressed/head_64.S. 56 * 57 * We only come here initially at boot nothing else comes here. 58 * 59 * Since we may be loaded at an address different from what we were 60 * compiled to run at we first fixup the physical addresses in our page 61 * tables and then reload them. 62 */ 63 64 /* Set up the stack for verify_cpu(), similar to initial_stack below */ 65 leaq (__end_init_task - FRAME_SIZE)(%rip), %rsp 66 67 leaq _text(%rip), %rdi 68 pushq %rsi 69 call startup_64_setup_env 70 popq %rsi 71 72 /* Now switch to __KERNEL_CS so IRET works reliably */ 73 pushq $__KERNEL_CS 74 leaq .Lon_kernel_cs(%rip), %rax 75 pushq %rax 76 lretq 77 78.Lon_kernel_cs: 79 UNWIND_HINT_EMPTY 80 81 /* Sanitize CPU configuration */ 82 call verify_cpu 83 84 /* 85 * Perform pagetable fixups. Additionally, if SME is active, encrypt 86 * the kernel and retrieve the modifier (SME encryption mask if SME 87 * is active) to be added to the initial pgdir entry that will be 88 * programmed into CR3. 89 */ 90 leaq _text(%rip), %rdi 91 pushq %rsi 92 call __startup_64 93 popq %rsi 94 95 /* Form the CR3 value being sure to include the CR3 modifier */ 96 addq $(early_top_pgt - __START_KERNEL_map), %rax 97 jmp 1f 98SYM_CODE_END(startup_64) 99 100SYM_CODE_START(secondary_startup_64) 101 UNWIND_HINT_EMPTY 102 ANNOTATE_NOENDBR 103 /* 104 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 105 * and someone has loaded a mapped page table. 106 * 107 * %rsi holds a physical pointer to real_mode_data. 108 * 109 * We come here either from startup_64 (using physical addresses) 110 * or from trampoline.S (using virtual addresses). 111 * 112 * Using virtual addresses from trampoline.S removes the need 113 * to have any identity mapped pages in the kernel page table 114 * after the boot processor executes this code. 115 */ 116 117 /* Sanitize CPU configuration */ 118 call verify_cpu 119 120 /* 121 * The secondary_startup_64_no_verify entry point is only used by 122 * SEV-ES guests. In those guests the call to verify_cpu() would cause 123 * #VC exceptions which can not be handled at this stage of secondary 124 * CPU bringup. 125 * 126 * All non SEV-ES systems, especially Intel systems, need to execute 127 * verify_cpu() above to make sure NX is enabled. 128 */ 129SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) 130 UNWIND_HINT_EMPTY 131 ANNOTATE_NOENDBR 132 133 /* 134 * Retrieve the modifier (SME encryption mask if SME is active) to be 135 * added to the initial pgdir entry that will be programmed into CR3. 136 */ 137 pushq %rsi 138 call __startup_secondary_64 139 popq %rsi 140 141 /* Form the CR3 value being sure to include the CR3 modifier */ 142 addq $(init_top_pgt - __START_KERNEL_map), %rax 1431: 144 145 /* Enable PAE mode, PGE and LA57 */ 146 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 147#ifdef CONFIG_X86_5LEVEL 148 testl $1, __pgtable_l5_enabled(%rip) 149 jz 1f 150 orl $X86_CR4_LA57, %ecx 1511: 152#endif 153 movq %rcx, %cr4 154 155 /* Setup early boot stage 4-/5-level pagetables. */ 156 addq phys_base(%rip), %rax 157 158 /* 159 * For SEV guests: Verify that the C-bit is correct. A malicious 160 * hypervisor could lie about the C-bit position to perform a ROP 161 * attack on the guest by writing to the unencrypted stack and wait for 162 * the next RET instruction. 163 * %rsi carries pointer to realmode data and is callee-clobbered. Save 164 * and restore it. 165 */ 166 pushq %rsi 167 movq %rax, %rdi 168 call sev_verify_cbit 169 popq %rsi 170 171 /* 172 * Switch to new page-table 173 * 174 * For the boot CPU this switches to early_top_pgt which still has the 175 * indentity mappings present. The secondary CPUs will switch to the 176 * init_top_pgt here, away from the trampoline_pgd and unmap the 177 * indentity mapped ranges. 178 */ 179 movq %rax, %cr3 180 181 /* 182 * Do a global TLB flush after the CR3 switch to make sure the TLB 183 * entries from the identity mapping are flushed. 184 */ 185 movq %cr4, %rcx 186 movq %rcx, %rax 187 xorq $X86_CR4_PGE, %rcx 188 movq %rcx, %cr4 189 movq %rax, %cr4 190 191 /* Ensure I am executing from virtual addresses */ 192 movq $1f, %rax 193 ANNOTATE_RETPOLINE_SAFE 194 jmp *%rax 1951: 196 UNWIND_HINT_EMPTY 197 ANNOTATE_NOENDBR // above 198 199 /* 200 * We must switch to a new descriptor in kernel space for the GDT 201 * because soon the kernel won't have access anymore to the userspace 202 * addresses where we're currently running on. We have to do that here 203 * because in 32bit we couldn't load a 64bit linear address. 204 */ 205 lgdt early_gdt_descr(%rip) 206 207 /* set up data segments */ 208 xorl %eax,%eax 209 movl %eax,%ds 210 movl %eax,%ss 211 movl %eax,%es 212 213 /* 214 * We don't really need to load %fs or %gs, but load them anyway 215 * to kill any stale realmode selectors. This allows execution 216 * under VT hardware. 217 */ 218 movl %eax,%fs 219 movl %eax,%gs 220 221 /* Set up %gs. 222 * 223 * The base of %gs always points to fixed_percpu_data. If the 224 * stack protector canary is enabled, it is located at %gs:40. 225 * Note that, on SMP, the boot cpu uses init data section until 226 * the per cpu areas are set up. 227 */ 228 movl $MSR_GS_BASE,%ecx 229 movl initial_gs(%rip),%eax 230 movl initial_gs+4(%rip),%edx 231 wrmsr 232 233 /* 234 * Setup a boot time stack - Any secondary CPU will have lost its stack 235 * by now because the cr3-switch above unmaps the real-mode stack 236 */ 237 movq initial_stack(%rip), %rsp 238 239 /* Setup and Load IDT */ 240 pushq %rsi 241 call early_setup_idt 242 popq %rsi 243 244 /* Check if nx is implemented */ 245 movl $0x80000001, %eax 246 cpuid 247 movl %edx,%edi 248 249 /* Setup EFER (Extended Feature Enable Register) */ 250 movl $MSR_EFER, %ecx 251 rdmsr 252 btsl $_EFER_SCE, %eax /* Enable System Call */ 253 btl $20,%edi /* No Execute supported? */ 254 jnc 1f 255 btsl $_EFER_NX, %eax 256 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 2571: wrmsr /* Make changes effective */ 258 259 /* Setup cr0 */ 260 movl $CR0_STATE, %eax 261 /* Make changes effective */ 262 movq %rax, %cr0 263 264 /* zero EFLAGS after setting rsp */ 265 pushq $0 266 popfq 267 268 /* rsi is pointer to real mode structure with interesting info. 269 pass it to C */ 270 movq %rsi, %rdi 271 272.Ljump_to_C_code: 273 /* 274 * Jump to run C code and to be on a real kernel address. 275 * Since we are running on identity-mapped space we have to jump 276 * to the full 64bit address, this is only possible as indirect 277 * jump. In addition we need to ensure %cs is set so we make this 278 * a far return. 279 * 280 * Note: do not change to far jump indirect with 64bit offset. 281 * 282 * AMD does not support far jump indirect with 64bit offset. 283 * AMD64 Architecture Programmer's Manual, Volume 3: states only 284 * JMP FAR mem16:16 FF /5 Far jump indirect, 285 * with the target specified by a far pointer in memory. 286 * JMP FAR mem16:32 FF /5 Far jump indirect, 287 * with the target specified by a far pointer in memory. 288 * 289 * Intel64 does support 64bit offset. 290 * Software Developer Manual Vol 2: states: 291 * FF /5 JMP m16:16 Jump far, absolute indirect, 292 * address given in m16:16 293 * FF /5 JMP m16:32 Jump far, absolute indirect, 294 * address given in m16:32. 295 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 296 * address given in m16:64. 297 */ 298 pushq $.Lafter_lret # put return address on stack for unwinder 299 xorl %ebp, %ebp # clear frame pointer 300 movq initial_code(%rip), %rax 301 pushq $__KERNEL_CS # set correct cs 302 pushq %rax # target address in negative space 303 lretq 304.Lafter_lret: 305 ANNOTATE_NOENDBR 306SYM_CODE_END(secondary_startup_64) 307 308#include "verify_cpu.S" 309#include "sev_verify_cbit.S" 310 311#ifdef CONFIG_HOTPLUG_CPU 312/* 313 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 314 * up already except stack. We just set up stack here. Then call 315 * start_secondary() via .Ljump_to_C_code. 316 */ 317SYM_CODE_START(start_cpu0) 318 UNWIND_HINT_EMPTY 319 movq initial_stack(%rip), %rsp 320 jmp .Ljump_to_C_code 321SYM_CODE_END(start_cpu0) 322#endif 323 324#ifdef CONFIG_AMD_MEM_ENCRYPT 325/* 326 * VC Exception handler used during early boot when running on kernel 327 * addresses, but before the switch to the idt_table can be made. 328 * The early_idt_handler_array can't be used here because it calls into a lot 329 * of __init code and this handler is also used during CPU offlining/onlining. 330 * Therefore this handler ends up in the .text section so that it stays around 331 * when .init.text is freed. 332 */ 333SYM_CODE_START_NOALIGN(vc_boot_ghcb) 334 UNWIND_HINT_IRET_REGS offset=8 335 ENDBR 336 337 /* Build pt_regs */ 338 PUSH_AND_CLEAR_REGS 339 340 /* Call C handler */ 341 movq %rsp, %rdi 342 movq ORIG_RAX(%rsp), %rsi 343 movq initial_vc_handler(%rip), %rax 344 ANNOTATE_RETPOLINE_SAFE 345 call *%rax 346 347 /* Unwind pt_regs */ 348 POP_REGS 349 350 /* Remove Error Code */ 351 addq $8, %rsp 352 353 iretq 354SYM_CODE_END(vc_boot_ghcb) 355#endif 356 357 /* Both SMP bootup and ACPI suspend change these variables */ 358 __REFDATA 359 .balign 8 360SYM_DATA(initial_code, .quad x86_64_start_kernel) 361SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) 362#ifdef CONFIG_AMD_MEM_ENCRYPT 363SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) 364#endif 365 366/* 367 * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder 368 * reliably detect the end of the stack. 369 */ 370SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE) 371 __FINITDATA 372 373 __INIT 374SYM_CODE_START(early_idt_handler_array) 375 i = 0 376 .rept NUM_EXCEPTION_VECTORS 377 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 378 UNWIND_HINT_IRET_REGS 379 ENDBR 380 pushq $0 # Dummy error code, to make stack frame uniform 381 .else 382 UNWIND_HINT_IRET_REGS offset=8 383 ENDBR 384 .endif 385 pushq $i # 72(%rsp) Vector number 386 jmp early_idt_handler_common 387 UNWIND_HINT_IRET_REGS 388 i = i + 1 389 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 390 .endr 391SYM_CODE_END(early_idt_handler_array) 392 ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS] 393 394SYM_CODE_START_LOCAL(early_idt_handler_common) 395 UNWIND_HINT_IRET_REGS offset=16 396 /* 397 * The stack is the hardware frame, an error code or zero, and the 398 * vector number. 399 */ 400 cld 401 402 incl early_recursion_flag(%rip) 403 404 /* The vector number is currently in the pt_regs->di slot. */ 405 pushq %rsi /* pt_regs->si */ 406 movq 8(%rsp), %rsi /* RSI = vector number */ 407 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 408 pushq %rdx /* pt_regs->dx */ 409 pushq %rcx /* pt_regs->cx */ 410 pushq %rax /* pt_regs->ax */ 411 pushq %r8 /* pt_regs->r8 */ 412 pushq %r9 /* pt_regs->r9 */ 413 pushq %r10 /* pt_regs->r10 */ 414 pushq %r11 /* pt_regs->r11 */ 415 pushq %rbx /* pt_regs->bx */ 416 pushq %rbp /* pt_regs->bp */ 417 pushq %r12 /* pt_regs->r12 */ 418 pushq %r13 /* pt_regs->r13 */ 419 pushq %r14 /* pt_regs->r14 */ 420 pushq %r15 /* pt_regs->r15 */ 421 UNWIND_HINT_REGS 422 423 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 424 call do_early_exception 425 426 decl early_recursion_flag(%rip) 427 jmp restore_regs_and_return_to_kernel 428SYM_CODE_END(early_idt_handler_common) 429 430#ifdef CONFIG_AMD_MEM_ENCRYPT 431/* 432 * VC Exception handler used during very early boot. The 433 * early_idt_handler_array can't be used because it returns via the 434 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early. 435 * 436 * XXX it does, fix this. 437 * 438 * This handler will end up in the .init.text section and not be 439 * available to boot secondary CPUs. 440 */ 441SYM_CODE_START_NOALIGN(vc_no_ghcb) 442 UNWIND_HINT_IRET_REGS offset=8 443 ENDBR 444 445 /* Build pt_regs */ 446 PUSH_AND_CLEAR_REGS 447 448 /* Call C handler */ 449 movq %rsp, %rdi 450 movq ORIG_RAX(%rsp), %rsi 451 call do_vc_no_ghcb 452 453 /* Unwind pt_regs */ 454 POP_REGS 455 456 /* Remove Error Code */ 457 addq $8, %rsp 458 459 /* Pure iret required here - don't use INTERRUPT_RETURN */ 460 iretq 461SYM_CODE_END(vc_no_ghcb) 462#endif 463 464#define SYM_DATA_START_PAGE_ALIGNED(name) \ 465 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) 466 467#ifdef CONFIG_PAGE_TABLE_ISOLATION 468/* 469 * Each PGD needs to be 8k long and 8k aligned. We do not 470 * ever go out to userspace with these, so we do not 471 * strictly *need* the second page, but this allows us to 472 * have a single set_pgd() implementation that does not 473 * need to worry about whether it has 4k or 8k to work 474 * with. 475 * 476 * This ensures PGDs are 8k long: 477 */ 478#define PTI_USER_PGD_FILL 512 479/* This ensures they are 8k-aligned: */ 480#define SYM_DATA_START_PTI_ALIGNED(name) \ 481 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) 482#else 483#define SYM_DATA_START_PTI_ALIGNED(name) \ 484 SYM_DATA_START_PAGE_ALIGNED(name) 485#define PTI_USER_PGD_FILL 0 486#endif 487 488/* Automate the creation of 1 to 1 mapping pmd entries */ 489#define PMDS(START, PERM, COUNT) \ 490 i = 0 ; \ 491 .rept (COUNT) ; \ 492 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 493 i = i + 1 ; \ 494 .endr 495 496 __INITDATA 497 .balign 4 498 499SYM_DATA_START_PTI_ALIGNED(early_top_pgt) 500 .fill 512,8,0 501 .fill PTI_USER_PGD_FILL,8,0 502SYM_DATA_END(early_top_pgt) 503 504SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) 505 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 506SYM_DATA_END(early_dynamic_pgts) 507 508SYM_DATA(early_recursion_flag, .long 0) 509 510 .data 511 512#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) 513SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 514 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 515 .org init_top_pgt + L4_PAGE_OFFSET*8, 0 516 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 517 .org init_top_pgt + L4_START_KERNEL*8, 0 518 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 519 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 520 .fill PTI_USER_PGD_FILL,8,0 521SYM_DATA_END(init_top_pgt) 522 523SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) 524 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 525 .fill 511, 8, 0 526SYM_DATA_END(level3_ident_pgt) 527SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) 528 /* 529 * Since I easily can, map the first 1G. 530 * Don't set NX because code runs from these pages. 531 * 532 * Note: This sets _PAGE_GLOBAL despite whether 533 * the CPU supports it or it is enabled. But, 534 * the CPU should ignore the bit. 535 */ 536 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 537SYM_DATA_END(level2_ident_pgt) 538#else 539SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 540 .fill 512,8,0 541 .fill PTI_USER_PGD_FILL,8,0 542SYM_DATA_END(init_top_pgt) 543#endif 544 545#ifdef CONFIG_X86_5LEVEL 546SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) 547 .fill 511,8,0 548 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 549SYM_DATA_END(level4_kernel_pgt) 550#endif 551 552SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) 553 .fill L3_START_KERNEL,8,0 554 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 555 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 556 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 557SYM_DATA_END(level3_kernel_pgt) 558 559SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) 560 /* 561 * Kernel high mapping. 562 * 563 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in 564 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, 565 * 512 MiB otherwise. 566 * 567 * (NOTE: after that starts the module area, see MODULES_VADDR.) 568 * 569 * This table is eventually used by the kernel during normal runtime. 570 * Care must be taken to clear out undesired bits later, like _PAGE_RW 571 * or _PAGE_GLOBAL in some cases. 572 */ 573 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) 574SYM_DATA_END(level2_kernel_pgt) 575 576SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) 577 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 578 pgtno = 0 579 .rept (FIXMAP_PMD_NUM) 580 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ 581 + _PAGE_TABLE_NOENC; 582 pgtno = pgtno + 1 583 .endr 584 /* 6 MB reserved space + a 2MB hole */ 585 .fill 4,8,0 586SYM_DATA_END(level2_fixmap_pgt) 587 588SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) 589 .rept (FIXMAP_PMD_NUM) 590 .fill 512,8,0 591 .endr 592SYM_DATA_END(level1_fixmap_pgt) 593 594#undef PMDS 595 596 .data 597 .align 16 598 599SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1) 600SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page)) 601 602 .align 16 603/* This must match the first entry in level2_kernel_pgt */ 604SYM_DATA(phys_base, .quad 0x0) 605EXPORT_SYMBOL(phys_base) 606 607#include "../../x86/xen/xen-head.S" 608 609 __PAGE_ALIGNED_BSS 610SYM_DATA_START_PAGE_ALIGNED(empty_zero_page) 611 .skip PAGE_SIZE 612SYM_DATA_END(empty_zero_page) 613EXPORT_SYMBOL(empty_zero_page) 614 615