1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4 * 5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10 */ 11 12 13#include <linux/linkage.h> 14#include <linux/threads.h> 15#include <linux/init.h> 16#include <linux/pgtable.h> 17#include <asm/segment.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21#include <asm/processor-flags.h> 22#include <asm/percpu.h> 23#include <asm/nops.h> 24#include "../entry/calling.h" 25#include <asm/export.h> 26#include <asm/nospec-branch.h> 27#include <asm/fixmap.h> 28 29/* 30 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE 31 * because we need identity-mapped pages. 32 */ 33#define l4_index(x) (((x) >> 39) & 511) 34#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 35 36L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 37L4_START_KERNEL = l4_index(__START_KERNEL_map) 38 39L3_START_KERNEL = pud_index(__START_KERNEL_map) 40 41 .text 42 __HEAD 43 .code64 44SYM_CODE_START_NOALIGN(startup_64) 45 UNWIND_HINT_EMPTY 46 /* 47 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 48 * and someone has loaded an identity mapped page table 49 * for us. These identity mapped page tables map all of the 50 * kernel pages and possibly all of memory. 51 * 52 * %rsi holds a physical pointer to real_mode_data. 53 * 54 * We come here either directly from a 64bit bootloader, or from 55 * arch/x86/boot/compressed/head_64.S. 56 * 57 * We only come here initially at boot nothing else comes here. 58 * 59 * Since we may be loaded at an address different from what we were 60 * compiled to run at we first fixup the physical addresses in our page 61 * tables and then reload them. 62 */ 63 64 /* Set up the stack for verify_cpu(), similar to initial_stack below */ 65 leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp 66 67 leaq _text(%rip), %rdi 68 pushq %rsi 69 call startup_64_setup_env 70 popq %rsi 71 72 /* Now switch to __KERNEL_CS so IRET works reliably */ 73 pushq $__KERNEL_CS 74 leaq .Lon_kernel_cs(%rip), %rax 75 pushq %rax 76 lretq 77 78.Lon_kernel_cs: 79 UNWIND_HINT_EMPTY 80 81 /* Sanitize CPU configuration */ 82 call verify_cpu 83 84 /* 85 * Perform pagetable fixups. Additionally, if SME is active, encrypt 86 * the kernel and retrieve the modifier (SME encryption mask if SME 87 * is active) to be added to the initial pgdir entry that will be 88 * programmed into CR3. 89 */ 90 leaq _text(%rip), %rdi 91 pushq %rsi 92 call __startup_64 93 popq %rsi 94 95 /* Form the CR3 value being sure to include the CR3 modifier */ 96 addq $(early_top_pgt - __START_KERNEL_map), %rax 97 jmp 1f 98SYM_CODE_END(startup_64) 99 100SYM_CODE_START(secondary_startup_64) 101 UNWIND_HINT_EMPTY 102 /* 103 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 104 * and someone has loaded a mapped page table. 105 * 106 * %rsi holds a physical pointer to real_mode_data. 107 * 108 * We come here either from startup_64 (using physical addresses) 109 * or from trampoline.S (using virtual addresses). 110 * 111 * Using virtual addresses from trampoline.S removes the need 112 * to have any identity mapped pages in the kernel page table 113 * after the boot processor executes this code. 114 */ 115 116 /* Sanitize CPU configuration */ 117 call verify_cpu 118 119 /* 120 * The secondary_startup_64_no_verify entry point is only used by 121 * SEV-ES guests. In those guests the call to verify_cpu() would cause 122 * #VC exceptions which can not be handled at this stage of secondary 123 * CPU bringup. 124 * 125 * All non SEV-ES systems, especially Intel systems, need to execute 126 * verify_cpu() above to make sure NX is enabled. 127 */ 128SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) 129 UNWIND_HINT_EMPTY 130 131 /* 132 * Retrieve the modifier (SME encryption mask if SME is active) to be 133 * added to the initial pgdir entry that will be programmed into CR3. 134 */ 135 pushq %rsi 136 call __startup_secondary_64 137 popq %rsi 138 139 /* Form the CR3 value being sure to include the CR3 modifier */ 140 addq $(init_top_pgt - __START_KERNEL_map), %rax 1411: 142 143 /* Enable PAE mode, PGE and LA57 */ 144 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 145#ifdef CONFIG_X86_5LEVEL 146 testl $1, __pgtable_l5_enabled(%rip) 147 jz 1f 148 orl $X86_CR4_LA57, %ecx 1491: 150#endif 151 movq %rcx, %cr4 152 153 /* Setup early boot stage 4-/5-level pagetables. */ 154 addq phys_base(%rip), %rax 155 movq %rax, %cr3 156 157 /* Ensure I am executing from virtual addresses */ 158 movq $1f, %rax 159 ANNOTATE_RETPOLINE_SAFE 160 jmp *%rax 1611: 162 UNWIND_HINT_EMPTY 163 164 /* 165 * We must switch to a new descriptor in kernel space for the GDT 166 * because soon the kernel won't have access anymore to the userspace 167 * addresses where we're currently running on. We have to do that here 168 * because in 32bit we couldn't load a 64bit linear address. 169 */ 170 lgdt early_gdt_descr(%rip) 171 172 /* set up data segments */ 173 xorl %eax,%eax 174 movl %eax,%ds 175 movl %eax,%ss 176 movl %eax,%es 177 178 /* 179 * We don't really need to load %fs or %gs, but load them anyway 180 * to kill any stale realmode selectors. This allows execution 181 * under VT hardware. 182 */ 183 movl %eax,%fs 184 movl %eax,%gs 185 186 /* Set up %gs. 187 * 188 * The base of %gs always points to fixed_percpu_data. If the 189 * stack protector canary is enabled, it is located at %gs:40. 190 * Note that, on SMP, the boot cpu uses init data section until 191 * the per cpu areas are set up. 192 */ 193 movl $MSR_GS_BASE,%ecx 194 movl initial_gs(%rip),%eax 195 movl initial_gs+4(%rip),%edx 196 wrmsr 197 198 /* 199 * Setup a boot time stack - Any secondary CPU will have lost its stack 200 * by now because the cr3-switch above unmaps the real-mode stack 201 */ 202 movq initial_stack(%rip), %rsp 203 204 /* Setup and Load IDT */ 205 pushq %rsi 206 call early_setup_idt 207 popq %rsi 208 209 /* Check if nx is implemented */ 210 movl $0x80000001, %eax 211 cpuid 212 movl %edx,%edi 213 214 /* Setup EFER (Extended Feature Enable Register) */ 215 movl $MSR_EFER, %ecx 216 rdmsr 217 btsl $_EFER_SCE, %eax /* Enable System Call */ 218 btl $20,%edi /* No Execute supported? */ 219 jnc 1f 220 btsl $_EFER_NX, %eax 221 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 2221: wrmsr /* Make changes effective */ 223 224 /* Setup cr0 */ 225 movl $CR0_STATE, %eax 226 /* Make changes effective */ 227 movq %rax, %cr0 228 229 /* zero EFLAGS after setting rsp */ 230 pushq $0 231 popfq 232 233 /* rsi is pointer to real mode structure with interesting info. 234 pass it to C */ 235 movq %rsi, %rdi 236 237.Ljump_to_C_code: 238 /* 239 * Jump to run C code and to be on a real kernel address. 240 * Since we are running on identity-mapped space we have to jump 241 * to the full 64bit address, this is only possible as indirect 242 * jump. In addition we need to ensure %cs is set so we make this 243 * a far return. 244 * 245 * Note: do not change to far jump indirect with 64bit offset. 246 * 247 * AMD does not support far jump indirect with 64bit offset. 248 * AMD64 Architecture Programmer's Manual, Volume 3: states only 249 * JMP FAR mem16:16 FF /5 Far jump indirect, 250 * with the target specified by a far pointer in memory. 251 * JMP FAR mem16:32 FF /5 Far jump indirect, 252 * with the target specified by a far pointer in memory. 253 * 254 * Intel64 does support 64bit offset. 255 * Software Developer Manual Vol 2: states: 256 * FF /5 JMP m16:16 Jump far, absolute indirect, 257 * address given in m16:16 258 * FF /5 JMP m16:32 Jump far, absolute indirect, 259 * address given in m16:32. 260 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 261 * address given in m16:64. 262 */ 263 pushq $.Lafter_lret # put return address on stack for unwinder 264 xorl %ebp, %ebp # clear frame pointer 265 movq initial_code(%rip), %rax 266 pushq $__KERNEL_CS # set correct cs 267 pushq %rax # target address in negative space 268 lretq 269.Lafter_lret: 270SYM_CODE_END(secondary_startup_64) 271 272#include "verify_cpu.S" 273 274#ifdef CONFIG_HOTPLUG_CPU 275/* 276 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 277 * up already except stack. We just set up stack here. Then call 278 * start_secondary() via .Ljump_to_C_code. 279 */ 280SYM_CODE_START(start_cpu0) 281 UNWIND_HINT_EMPTY 282 movq initial_stack(%rip), %rsp 283 jmp .Ljump_to_C_code 284SYM_CODE_END(start_cpu0) 285#endif 286 287#ifdef CONFIG_AMD_MEM_ENCRYPT 288/* 289 * VC Exception handler used during early boot when running on kernel 290 * addresses, but before the switch to the idt_table can be made. 291 * The early_idt_handler_array can't be used here because it calls into a lot 292 * of __init code and this handler is also used during CPU offlining/onlining. 293 * Therefore this handler ends up in the .text section so that it stays around 294 * when .init.text is freed. 295 */ 296SYM_CODE_START_NOALIGN(vc_boot_ghcb) 297 UNWIND_HINT_IRET_REGS offset=8 298 299 /* Build pt_regs */ 300 PUSH_AND_CLEAR_REGS 301 302 /* Call C handler */ 303 movq %rsp, %rdi 304 movq ORIG_RAX(%rsp), %rsi 305 movq initial_vc_handler(%rip), %rax 306 ANNOTATE_RETPOLINE_SAFE 307 call *%rax 308 309 /* Unwind pt_regs */ 310 POP_REGS 311 312 /* Remove Error Code */ 313 addq $8, %rsp 314 315 /* Pure iret required here - don't use INTERRUPT_RETURN */ 316 iretq 317SYM_CODE_END(vc_boot_ghcb) 318#endif 319 320 /* Both SMP bootup and ACPI suspend change these variables */ 321 __REFDATA 322 .balign 8 323SYM_DATA(initial_code, .quad x86_64_start_kernel) 324SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) 325#ifdef CONFIG_AMD_MEM_ENCRYPT 326SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) 327#endif 328 329/* 330 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder 331 * reliably detect the end of the stack. 332 */ 333SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS) 334 __FINITDATA 335 336 __INIT 337SYM_CODE_START(early_idt_handler_array) 338 i = 0 339 .rept NUM_EXCEPTION_VECTORS 340 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 341 UNWIND_HINT_IRET_REGS 342 pushq $0 # Dummy error code, to make stack frame uniform 343 .else 344 UNWIND_HINT_IRET_REGS offset=8 345 .endif 346 pushq $i # 72(%rsp) Vector number 347 jmp early_idt_handler_common 348 UNWIND_HINT_IRET_REGS 349 i = i + 1 350 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 351 .endr 352 UNWIND_HINT_IRET_REGS offset=16 353SYM_CODE_END(early_idt_handler_array) 354 355SYM_CODE_START_LOCAL(early_idt_handler_common) 356 /* 357 * The stack is the hardware frame, an error code or zero, and the 358 * vector number. 359 */ 360 cld 361 362 incl early_recursion_flag(%rip) 363 364 /* The vector number is currently in the pt_regs->di slot. */ 365 pushq %rsi /* pt_regs->si */ 366 movq 8(%rsp), %rsi /* RSI = vector number */ 367 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 368 pushq %rdx /* pt_regs->dx */ 369 pushq %rcx /* pt_regs->cx */ 370 pushq %rax /* pt_regs->ax */ 371 pushq %r8 /* pt_regs->r8 */ 372 pushq %r9 /* pt_regs->r9 */ 373 pushq %r10 /* pt_regs->r10 */ 374 pushq %r11 /* pt_regs->r11 */ 375 pushq %rbx /* pt_regs->bx */ 376 pushq %rbp /* pt_regs->bp */ 377 pushq %r12 /* pt_regs->r12 */ 378 pushq %r13 /* pt_regs->r13 */ 379 pushq %r14 /* pt_regs->r14 */ 380 pushq %r15 /* pt_regs->r15 */ 381 UNWIND_HINT_REGS 382 383 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 384 call do_early_exception 385 386 decl early_recursion_flag(%rip) 387 jmp restore_regs_and_return_to_kernel 388SYM_CODE_END(early_idt_handler_common) 389 390#ifdef CONFIG_AMD_MEM_ENCRYPT 391/* 392 * VC Exception handler used during very early boot. The 393 * early_idt_handler_array can't be used because it returns via the 394 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early. 395 * 396 * This handler will end up in the .init.text section and not be 397 * available to boot secondary CPUs. 398 */ 399SYM_CODE_START_NOALIGN(vc_no_ghcb) 400 UNWIND_HINT_IRET_REGS offset=8 401 402 /* Build pt_regs */ 403 PUSH_AND_CLEAR_REGS 404 405 /* Call C handler */ 406 movq %rsp, %rdi 407 movq ORIG_RAX(%rsp), %rsi 408 call do_vc_no_ghcb 409 410 /* Unwind pt_regs */ 411 POP_REGS 412 413 /* Remove Error Code */ 414 addq $8, %rsp 415 416 /* Pure iret required here - don't use INTERRUPT_RETURN */ 417 iretq 418SYM_CODE_END(vc_no_ghcb) 419#endif 420 421#define SYM_DATA_START_PAGE_ALIGNED(name) \ 422 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) 423 424#ifdef CONFIG_PAGE_TABLE_ISOLATION 425/* 426 * Each PGD needs to be 8k long and 8k aligned. We do not 427 * ever go out to userspace with these, so we do not 428 * strictly *need* the second page, but this allows us to 429 * have a single set_pgd() implementation that does not 430 * need to worry about whether it has 4k or 8k to work 431 * with. 432 * 433 * This ensures PGDs are 8k long: 434 */ 435#define PTI_USER_PGD_FILL 512 436/* This ensures they are 8k-aligned: */ 437#define SYM_DATA_START_PTI_ALIGNED(name) \ 438 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) 439#else 440#define SYM_DATA_START_PTI_ALIGNED(name) \ 441 SYM_DATA_START_PAGE_ALIGNED(name) 442#define PTI_USER_PGD_FILL 0 443#endif 444 445/* Automate the creation of 1 to 1 mapping pmd entries */ 446#define PMDS(START, PERM, COUNT) \ 447 i = 0 ; \ 448 .rept (COUNT) ; \ 449 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 450 i = i + 1 ; \ 451 .endr 452 453 __INITDATA 454 .balign 4 455 456SYM_DATA_START_PTI_ALIGNED(early_top_pgt) 457 .fill 512,8,0 458 .fill PTI_USER_PGD_FILL,8,0 459SYM_DATA_END(early_top_pgt) 460 461SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) 462 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 463SYM_DATA_END(early_dynamic_pgts) 464 465SYM_DATA(early_recursion_flag, .long 0) 466 467 .data 468 469#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) 470SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 471 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 472 .org init_top_pgt + L4_PAGE_OFFSET*8, 0 473 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 474 .org init_top_pgt + L4_START_KERNEL*8, 0 475 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 476 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 477 .fill PTI_USER_PGD_FILL,8,0 478SYM_DATA_END(init_top_pgt) 479 480SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) 481 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 482 .fill 511, 8, 0 483SYM_DATA_END(level3_ident_pgt) 484SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) 485 /* 486 * Since I easily can, map the first 1G. 487 * Don't set NX because code runs from these pages. 488 * 489 * Note: This sets _PAGE_GLOBAL despite whether 490 * the CPU supports it or it is enabled. But, 491 * the CPU should ignore the bit. 492 */ 493 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 494SYM_DATA_END(level2_ident_pgt) 495#else 496SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 497 .fill 512,8,0 498 .fill PTI_USER_PGD_FILL,8,0 499SYM_DATA_END(init_top_pgt) 500#endif 501 502#ifdef CONFIG_X86_5LEVEL 503SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) 504 .fill 511,8,0 505 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 506SYM_DATA_END(level4_kernel_pgt) 507#endif 508 509SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) 510 .fill L3_START_KERNEL,8,0 511 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 512 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 513 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 514SYM_DATA_END(level3_kernel_pgt) 515 516SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) 517 /* 518 * 512 MB kernel mapping. We spend a full page on this pagetable 519 * anyway. 520 * 521 * The kernel code+data+bss must not be bigger than that. 522 * 523 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 524 * If you want to increase this then increase MODULES_VADDR 525 * too.) 526 * 527 * This table is eventually used by the kernel during normal 528 * runtime. Care must be taken to clear out undesired bits 529 * later, like _PAGE_RW or _PAGE_GLOBAL in some cases. 530 */ 531 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 532 KERNEL_IMAGE_SIZE/PMD_SIZE) 533SYM_DATA_END(level2_kernel_pgt) 534 535SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) 536 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 537 pgtno = 0 538 .rept (FIXMAP_PMD_NUM) 539 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ 540 + _PAGE_TABLE_NOENC; 541 pgtno = pgtno + 1 542 .endr 543 /* 6 MB reserved space + a 2MB hole */ 544 .fill 4,8,0 545SYM_DATA_END(level2_fixmap_pgt) 546 547SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) 548 .rept (FIXMAP_PMD_NUM) 549 .fill 512,8,0 550 .endr 551SYM_DATA_END(level1_fixmap_pgt) 552 553#undef PMDS 554 555 .data 556 .align 16 557 558SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1) 559SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page)) 560 561 .align 16 562/* This must match the first entry in level2_kernel_pgt */ 563SYM_DATA(phys_base, .quad 0x0) 564EXPORT_SYMBOL(phys_base) 565 566#include "../../x86/xen/xen-head.S" 567 568 __PAGE_ALIGNED_BSS 569SYM_DATA_START_PAGE_ALIGNED(empty_zero_page) 570 .skip PAGE_SIZE 571SYM_DATA_END(empty_zero_page) 572EXPORT_SYMBOL(empty_zero_page) 573 574