1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4 * 5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10 */ 11 12 13#include <linux/linkage.h> 14#include <linux/threads.h> 15#include <linux/init.h> 16#include <linux/pgtable.h> 17#include <asm/segment.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21#include <asm/processor-flags.h> 22#include <asm/percpu.h> 23#include <asm/nops.h> 24#include "../entry/calling.h" 25#include <asm/export.h> 26#include <asm/nospec-branch.h> 27#include <asm/fixmap.h> 28 29#ifdef CONFIG_PARAVIRT_XXL 30#include <asm/asm-offsets.h> 31#include <asm/paravirt.h> 32#define GET_CR2_INTO(reg) GET_CR2_INTO_AX ; _ASM_MOV %_ASM_AX, reg 33#else 34#define INTERRUPT_RETURN iretq 35#define GET_CR2_INTO(reg) _ASM_MOV %cr2, reg 36#endif 37 38/* 39 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE 40 * because we need identity-mapped pages. 41 */ 42#define l4_index(x) (((x) >> 39) & 511) 43#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 44 45L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 46L4_START_KERNEL = l4_index(__START_KERNEL_map) 47 48L3_START_KERNEL = pud_index(__START_KERNEL_map) 49 50 .text 51 __HEAD 52 .code64 53SYM_CODE_START_NOALIGN(startup_64) 54 UNWIND_HINT_EMPTY 55 /* 56 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 57 * and someone has loaded an identity mapped page table 58 * for us. These identity mapped page tables map all of the 59 * kernel pages and possibly all of memory. 60 * 61 * %rsi holds a physical pointer to real_mode_data. 62 * 63 * We come here either directly from a 64bit bootloader, or from 64 * arch/x86/boot/compressed/head_64.S. 65 * 66 * We only come here initially at boot nothing else comes here. 67 * 68 * Since we may be loaded at an address different from what we were 69 * compiled to run at we first fixup the physical addresses in our page 70 * tables and then reload them. 71 */ 72 73 /* Set up the stack for verify_cpu(), similar to initial_stack below */ 74 leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp 75 76 leaq _text(%rip), %rdi 77 pushq %rsi 78 call startup_64_setup_env 79 popq %rsi 80 81 /* Now switch to __KERNEL_CS so IRET works reliably */ 82 pushq $__KERNEL_CS 83 leaq .Lon_kernel_cs(%rip), %rax 84 pushq %rax 85 lretq 86 87.Lon_kernel_cs: 88 UNWIND_HINT_EMPTY 89 90 /* Sanitize CPU configuration */ 91 call verify_cpu 92 93 /* 94 * Perform pagetable fixups. Additionally, if SME is active, encrypt 95 * the kernel and retrieve the modifier (SME encryption mask if SME 96 * is active) to be added to the initial pgdir entry that will be 97 * programmed into CR3. 98 */ 99 leaq _text(%rip), %rdi 100 pushq %rsi 101 call __startup_64 102 popq %rsi 103 104 /* Form the CR3 value being sure to include the CR3 modifier */ 105 addq $(early_top_pgt - __START_KERNEL_map), %rax 106 jmp 1f 107SYM_CODE_END(startup_64) 108 109SYM_CODE_START(secondary_startup_64) 110 UNWIND_HINT_EMPTY 111 /* 112 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 113 * and someone has loaded a mapped page table. 114 * 115 * %rsi holds a physical pointer to real_mode_data. 116 * 117 * We come here either from startup_64 (using physical addresses) 118 * or from trampoline.S (using virtual addresses). 119 * 120 * Using virtual addresses from trampoline.S removes the need 121 * to have any identity mapped pages in the kernel page table 122 * after the boot processor executes this code. 123 */ 124 125 /* Sanitize CPU configuration */ 126 call verify_cpu 127 128 /* 129 * Retrieve the modifier (SME encryption mask if SME is active) to be 130 * added to the initial pgdir entry that will be programmed into CR3. 131 */ 132 pushq %rsi 133 call __startup_secondary_64 134 popq %rsi 135 136 /* Form the CR3 value being sure to include the CR3 modifier */ 137 addq $(init_top_pgt - __START_KERNEL_map), %rax 1381: 139 140 /* Enable PAE mode, PGE and LA57 */ 141 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 142#ifdef CONFIG_X86_5LEVEL 143 testl $1, __pgtable_l5_enabled(%rip) 144 jz 1f 145 orl $X86_CR4_LA57, %ecx 1461: 147#endif 148 movq %rcx, %cr4 149 150 /* Setup early boot stage 4-/5-level pagetables. */ 151 addq phys_base(%rip), %rax 152 movq %rax, %cr3 153 154 /* Ensure I am executing from virtual addresses */ 155 movq $1f, %rax 156 ANNOTATE_RETPOLINE_SAFE 157 jmp *%rax 1581: 159 UNWIND_HINT_EMPTY 160 161 /* 162 * We must switch to a new descriptor in kernel space for the GDT 163 * because soon the kernel won't have access anymore to the userspace 164 * addresses where we're currently running on. We have to do that here 165 * because in 32bit we couldn't load a 64bit linear address. 166 */ 167 lgdt early_gdt_descr(%rip) 168 169 /* set up data segments */ 170 xorl %eax,%eax 171 movl %eax,%ds 172 movl %eax,%ss 173 movl %eax,%es 174 175 /* 176 * We don't really need to load %fs or %gs, but load them anyway 177 * to kill any stale realmode selectors. This allows execution 178 * under VT hardware. 179 */ 180 movl %eax,%fs 181 movl %eax,%gs 182 183 /* Set up %gs. 184 * 185 * The base of %gs always points to fixed_percpu_data. If the 186 * stack protector canary is enabled, it is located at %gs:40. 187 * Note that, on SMP, the boot cpu uses init data section until 188 * the per cpu areas are set up. 189 */ 190 movl $MSR_GS_BASE,%ecx 191 movl initial_gs(%rip),%eax 192 movl initial_gs+4(%rip),%edx 193 wrmsr 194 195 /* 196 * Setup a boot time stack - Any secondary CPU will have lost its stack 197 * by now because the cr3-switch above unmaps the real-mode stack 198 */ 199 movq initial_stack(%rip), %rsp 200 201 /* Check if nx is implemented */ 202 movl $0x80000001, %eax 203 cpuid 204 movl %edx,%edi 205 206 /* Setup EFER (Extended Feature Enable Register) */ 207 movl $MSR_EFER, %ecx 208 rdmsr 209 btsl $_EFER_SCE, %eax /* Enable System Call */ 210 btl $20,%edi /* No Execute supported? */ 211 jnc 1f 212 btsl $_EFER_NX, %eax 213 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 2141: wrmsr /* Make changes effective */ 215 216 /* Setup cr0 */ 217 movl $CR0_STATE, %eax 218 /* Make changes effective */ 219 movq %rax, %cr0 220 221 /* zero EFLAGS after setting rsp */ 222 pushq $0 223 popfq 224 225 /* rsi is pointer to real mode structure with interesting info. 226 pass it to C */ 227 movq %rsi, %rdi 228 229.Ljump_to_C_code: 230 /* 231 * Jump to run C code and to be on a real kernel address. 232 * Since we are running on identity-mapped space we have to jump 233 * to the full 64bit address, this is only possible as indirect 234 * jump. In addition we need to ensure %cs is set so we make this 235 * a far return. 236 * 237 * Note: do not change to far jump indirect with 64bit offset. 238 * 239 * AMD does not support far jump indirect with 64bit offset. 240 * AMD64 Architecture Programmer's Manual, Volume 3: states only 241 * JMP FAR mem16:16 FF /5 Far jump indirect, 242 * with the target specified by a far pointer in memory. 243 * JMP FAR mem16:32 FF /5 Far jump indirect, 244 * with the target specified by a far pointer in memory. 245 * 246 * Intel64 does support 64bit offset. 247 * Software Developer Manual Vol 2: states: 248 * FF /5 JMP m16:16 Jump far, absolute indirect, 249 * address given in m16:16 250 * FF /5 JMP m16:32 Jump far, absolute indirect, 251 * address given in m16:32. 252 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 253 * address given in m16:64. 254 */ 255 pushq $.Lafter_lret # put return address on stack for unwinder 256 xorl %ebp, %ebp # clear frame pointer 257 movq initial_code(%rip), %rax 258 pushq $__KERNEL_CS # set correct cs 259 pushq %rax # target address in negative space 260 lretq 261.Lafter_lret: 262SYM_CODE_END(secondary_startup_64) 263 264#include "verify_cpu.S" 265 266#ifdef CONFIG_HOTPLUG_CPU 267/* 268 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 269 * up already except stack. We just set up stack here. Then call 270 * start_secondary() via .Ljump_to_C_code. 271 */ 272SYM_CODE_START(start_cpu0) 273 UNWIND_HINT_EMPTY 274 movq initial_stack(%rip), %rsp 275 jmp .Ljump_to_C_code 276SYM_CODE_END(start_cpu0) 277#endif 278 279 /* Both SMP bootup and ACPI suspend change these variables */ 280 __REFDATA 281 .balign 8 282SYM_DATA(initial_code, .quad x86_64_start_kernel) 283SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) 284 285/* 286 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder 287 * reliably detect the end of the stack. 288 */ 289SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS) 290 __FINITDATA 291 292 __INIT 293SYM_CODE_START(early_idt_handler_array) 294 i = 0 295 .rept NUM_EXCEPTION_VECTORS 296 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 297 UNWIND_HINT_IRET_REGS 298 pushq $0 # Dummy error code, to make stack frame uniform 299 .else 300 UNWIND_HINT_IRET_REGS offset=8 301 .endif 302 pushq $i # 72(%rsp) Vector number 303 jmp early_idt_handler_common 304 UNWIND_HINT_IRET_REGS 305 i = i + 1 306 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 307 .endr 308 UNWIND_HINT_IRET_REGS offset=16 309SYM_CODE_END(early_idt_handler_array) 310 311SYM_CODE_START_LOCAL(early_idt_handler_common) 312 /* 313 * The stack is the hardware frame, an error code or zero, and the 314 * vector number. 315 */ 316 cld 317 318 incl early_recursion_flag(%rip) 319 320 /* The vector number is currently in the pt_regs->di slot. */ 321 pushq %rsi /* pt_regs->si */ 322 movq 8(%rsp), %rsi /* RSI = vector number */ 323 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 324 pushq %rdx /* pt_regs->dx */ 325 pushq %rcx /* pt_regs->cx */ 326 pushq %rax /* pt_regs->ax */ 327 pushq %r8 /* pt_regs->r8 */ 328 pushq %r9 /* pt_regs->r9 */ 329 pushq %r10 /* pt_regs->r10 */ 330 pushq %r11 /* pt_regs->r11 */ 331 pushq %rbx /* pt_regs->bx */ 332 pushq %rbp /* pt_regs->bp */ 333 pushq %r12 /* pt_regs->r12 */ 334 pushq %r13 /* pt_regs->r13 */ 335 pushq %r14 /* pt_regs->r14 */ 336 pushq %r15 /* pt_regs->r15 */ 337 UNWIND_HINT_REGS 338 339 cmpq $14,%rsi /* Page fault? */ 340 jnz 10f 341 GET_CR2_INTO(%rdi) /* can clobber %rax if pv */ 342 call early_make_pgtable 343 andl %eax,%eax 344 jz 20f /* All good */ 345 34610: 347 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 348 call early_fixup_exception 349 35020: 351 decl early_recursion_flag(%rip) 352 jmp restore_regs_and_return_to_kernel 353SYM_CODE_END(early_idt_handler_common) 354 355 356#define SYM_DATA_START_PAGE_ALIGNED(name) \ 357 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) 358 359#ifdef CONFIG_PAGE_TABLE_ISOLATION 360/* 361 * Each PGD needs to be 8k long and 8k aligned. We do not 362 * ever go out to userspace with these, so we do not 363 * strictly *need* the second page, but this allows us to 364 * have a single set_pgd() implementation that does not 365 * need to worry about whether it has 4k or 8k to work 366 * with. 367 * 368 * This ensures PGDs are 8k long: 369 */ 370#define PTI_USER_PGD_FILL 512 371/* This ensures they are 8k-aligned: */ 372#define SYM_DATA_START_PTI_ALIGNED(name) \ 373 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) 374#else 375#define SYM_DATA_START_PTI_ALIGNED(name) \ 376 SYM_DATA_START_PAGE_ALIGNED(name) 377#define PTI_USER_PGD_FILL 0 378#endif 379 380/* Automate the creation of 1 to 1 mapping pmd entries */ 381#define PMDS(START, PERM, COUNT) \ 382 i = 0 ; \ 383 .rept (COUNT) ; \ 384 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 385 i = i + 1 ; \ 386 .endr 387 388 __INITDATA 389 .balign 4 390 391SYM_DATA_START_PTI_ALIGNED(early_top_pgt) 392 .fill 512,8,0 393 .fill PTI_USER_PGD_FILL,8,0 394SYM_DATA_END(early_top_pgt) 395 396SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) 397 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 398SYM_DATA_END(early_dynamic_pgts) 399 400SYM_DATA(early_recursion_flag, .long 0) 401 402 .data 403 404#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) 405SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 406 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 407 .org init_top_pgt + L4_PAGE_OFFSET*8, 0 408 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 409 .org init_top_pgt + L4_START_KERNEL*8, 0 410 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 411 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 412 .fill PTI_USER_PGD_FILL,8,0 413SYM_DATA_END(init_top_pgt) 414 415SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) 416 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 417 .fill 511, 8, 0 418SYM_DATA_END(level3_ident_pgt) 419SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) 420 /* 421 * Since I easily can, map the first 1G. 422 * Don't set NX because code runs from these pages. 423 * 424 * Note: This sets _PAGE_GLOBAL despite whether 425 * the CPU supports it or it is enabled. But, 426 * the CPU should ignore the bit. 427 */ 428 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 429SYM_DATA_END(level2_ident_pgt) 430#else 431SYM_DATA_START_PTI_ALIGNED(init_top_pgt) 432 .fill 512,8,0 433 .fill PTI_USER_PGD_FILL,8,0 434SYM_DATA_END(init_top_pgt) 435#endif 436 437#ifdef CONFIG_X86_5LEVEL 438SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) 439 .fill 511,8,0 440 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 441SYM_DATA_END(level4_kernel_pgt) 442#endif 443 444SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) 445 .fill L3_START_KERNEL,8,0 446 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 447 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 448 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 449SYM_DATA_END(level3_kernel_pgt) 450 451SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) 452 /* 453 * 512 MB kernel mapping. We spend a full page on this pagetable 454 * anyway. 455 * 456 * The kernel code+data+bss must not be bigger than that. 457 * 458 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 459 * If you want to increase this then increase MODULES_VADDR 460 * too.) 461 * 462 * This table is eventually used by the kernel during normal 463 * runtime. Care must be taken to clear out undesired bits 464 * later, like _PAGE_RW or _PAGE_GLOBAL in some cases. 465 */ 466 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 467 KERNEL_IMAGE_SIZE/PMD_SIZE) 468SYM_DATA_END(level2_kernel_pgt) 469 470SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) 471 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 472 pgtno = 0 473 .rept (FIXMAP_PMD_NUM) 474 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ 475 + _PAGE_TABLE_NOENC; 476 pgtno = pgtno + 1 477 .endr 478 /* 6 MB reserved space + a 2MB hole */ 479 .fill 4,8,0 480SYM_DATA_END(level2_fixmap_pgt) 481 482SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) 483 .rept (FIXMAP_PMD_NUM) 484 .fill 512,8,0 485 .endr 486SYM_DATA_END(level1_fixmap_pgt) 487 488#undef PMDS 489 490 .data 491 .align 16 492 493SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1) 494SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page)) 495 496 .align 16 497/* This must match the first entry in level2_kernel_pgt */ 498SYM_DATA(phys_base, .quad 0x0) 499EXPORT_SYMBOL(phys_base) 500 501#include "../../x86/xen/xen-head.S" 502 503 __PAGE_ALIGNED_BSS 504SYM_DATA_START_PAGE_ALIGNED(empty_zero_page) 505 .skip PAGE_SIZE 506SYM_DATA_END(empty_zero_page) 507EXPORT_SYMBOL(empty_zero_page) 508 509