1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4 * 5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10 */ 11 12 13#include <linux/linkage.h> 14#include <linux/threads.h> 15#include <linux/init.h> 16#include <asm/segment.h> 17#include <asm/pgtable.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21#include <asm/processor-flags.h> 22#include <asm/percpu.h> 23#include <asm/nops.h> 24#include "../entry/calling.h" 25#include <asm/export.h> 26#include <asm/nospec-branch.h> 27 28#ifdef CONFIG_PARAVIRT 29#include <asm/asm-offsets.h> 30#include <asm/paravirt.h> 31#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 32#else 33#define GET_CR2_INTO(reg) movq %cr2, reg 34#endif 35#ifndef CONFIG_PARAVIRT_XXL 36#define INTERRUPT_RETURN iretq 37#endif 38 39/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 40 * because we need identity-mapped pages. 41 * 42 */ 43 44#define l4_index(x) (((x) >> 39) & 511) 45#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 46 47L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 48L4_START_KERNEL = l4_index(__START_KERNEL_map) 49 50L3_START_KERNEL = pud_index(__START_KERNEL_map) 51 52 .text 53 __HEAD 54 .code64 55 .globl startup_64 56startup_64: 57 UNWIND_HINT_EMPTY 58 /* 59 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 60 * and someone has loaded an identity mapped page table 61 * for us. These identity mapped page tables map all of the 62 * kernel pages and possibly all of memory. 63 * 64 * %rsi holds a physical pointer to real_mode_data. 65 * 66 * We come here either directly from a 64bit bootloader, or from 67 * arch/x86/boot/compressed/head_64.S. 68 * 69 * We only come here initially at boot nothing else comes here. 70 * 71 * Since we may be loaded at an address different from what we were 72 * compiled to run at we first fixup the physical addresses in our page 73 * tables and then reload them. 74 */ 75 76 /* Set up the stack for verify_cpu(), similar to initial_stack below */ 77 leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp 78 79 /* Sanitize CPU configuration */ 80 call verify_cpu 81 82 /* 83 * Perform pagetable fixups. Additionally, if SME is active, encrypt 84 * the kernel and retrieve the modifier (SME encryption mask if SME 85 * is active) to be added to the initial pgdir entry that will be 86 * programmed into CR3. 87 */ 88 leaq _text(%rip), %rdi 89 pushq %rsi 90 call __startup_64 91 popq %rsi 92 93 /* Form the CR3 value being sure to include the CR3 modifier */ 94 addq $(early_top_pgt - __START_KERNEL_map), %rax 95 jmp 1f 96ENTRY(secondary_startup_64) 97 UNWIND_HINT_EMPTY 98 /* 99 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 100 * and someone has loaded a mapped page table. 101 * 102 * %rsi holds a physical pointer to real_mode_data. 103 * 104 * We come here either from startup_64 (using physical addresses) 105 * or from trampoline.S (using virtual addresses). 106 * 107 * Using virtual addresses from trampoline.S removes the need 108 * to have any identity mapped pages in the kernel page table 109 * after the boot processor executes this code. 110 */ 111 112 /* Sanitize CPU configuration */ 113 call verify_cpu 114 115 /* 116 * Retrieve the modifier (SME encryption mask if SME is active) to be 117 * added to the initial pgdir entry that will be programmed into CR3. 118 */ 119 pushq %rsi 120 call __startup_secondary_64 121 popq %rsi 122 123 /* Form the CR3 value being sure to include the CR3 modifier */ 124 addq $(init_top_pgt - __START_KERNEL_map), %rax 1251: 126 127 /* Enable PAE mode, PGE and LA57 */ 128 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 129#ifdef CONFIG_X86_5LEVEL 130 testl $1, __pgtable_l5_enabled(%rip) 131 jz 1f 132 orl $X86_CR4_LA57, %ecx 1331: 134#endif 135 movq %rcx, %cr4 136 137 /* Setup early boot stage 4-/5-level pagetables. */ 138 addq phys_base(%rip), %rax 139 movq %rax, %cr3 140 141 /* Ensure I am executing from virtual addresses */ 142 movq $1f, %rax 143 ANNOTATE_RETPOLINE_SAFE 144 jmp *%rax 1451: 146 UNWIND_HINT_EMPTY 147 148 /* Check if nx is implemented */ 149 movl $0x80000001, %eax 150 cpuid 151 movl %edx,%edi 152 153 /* Setup EFER (Extended Feature Enable Register) */ 154 movl $MSR_EFER, %ecx 155 rdmsr 156 btsl $_EFER_SCE, %eax /* Enable System Call */ 157 btl $20,%edi /* No Execute supported? */ 158 jnc 1f 159 btsl $_EFER_NX, %eax 160 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 1611: wrmsr /* Make changes effective */ 162 163 /* Setup cr0 */ 164 movl $CR0_STATE, %eax 165 /* Make changes effective */ 166 movq %rax, %cr0 167 168 /* Setup a boot time stack */ 169 movq initial_stack(%rip), %rsp 170 171 /* zero EFLAGS after setting rsp */ 172 pushq $0 173 popfq 174 175 /* 176 * We must switch to a new descriptor in kernel space for the GDT 177 * because soon the kernel won't have access anymore to the userspace 178 * addresses where we're currently running on. We have to do that here 179 * because in 32bit we couldn't load a 64bit linear address. 180 */ 181 lgdt early_gdt_descr(%rip) 182 183 /* set up data segments */ 184 xorl %eax,%eax 185 movl %eax,%ds 186 movl %eax,%ss 187 movl %eax,%es 188 189 /* 190 * We don't really need to load %fs or %gs, but load them anyway 191 * to kill any stale realmode selectors. This allows execution 192 * under VT hardware. 193 */ 194 movl %eax,%fs 195 movl %eax,%gs 196 197 /* Set up %gs. 198 * 199 * The base of %gs always points to the bottom of the irqstack 200 * union. If the stack protector canary is enabled, it is 201 * located at %gs:40. Note that, on SMP, the boot cpu uses 202 * init data section till per cpu areas are set up. 203 */ 204 movl $MSR_GS_BASE,%ecx 205 movl initial_gs(%rip),%eax 206 movl initial_gs+4(%rip),%edx 207 wrmsr 208 209 /* rsi is pointer to real mode structure with interesting info. 210 pass it to C */ 211 movq %rsi, %rdi 212 213.Ljump_to_C_code: 214 /* 215 * Jump to run C code and to be on a real kernel address. 216 * Since we are running on identity-mapped space we have to jump 217 * to the full 64bit address, this is only possible as indirect 218 * jump. In addition we need to ensure %cs is set so we make this 219 * a far return. 220 * 221 * Note: do not change to far jump indirect with 64bit offset. 222 * 223 * AMD does not support far jump indirect with 64bit offset. 224 * AMD64 Architecture Programmer's Manual, Volume 3: states only 225 * JMP FAR mem16:16 FF /5 Far jump indirect, 226 * with the target specified by a far pointer in memory. 227 * JMP FAR mem16:32 FF /5 Far jump indirect, 228 * with the target specified by a far pointer in memory. 229 * 230 * Intel64 does support 64bit offset. 231 * Software Developer Manual Vol 2: states: 232 * FF /5 JMP m16:16 Jump far, absolute indirect, 233 * address given in m16:16 234 * FF /5 JMP m16:32 Jump far, absolute indirect, 235 * address given in m16:32. 236 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 237 * address given in m16:64. 238 */ 239 pushq $.Lafter_lret # put return address on stack for unwinder 240 xorl %ebp, %ebp # clear frame pointer 241 movq initial_code(%rip), %rax 242 pushq $__KERNEL_CS # set correct cs 243 pushq %rax # target address in negative space 244 lretq 245.Lafter_lret: 246END(secondary_startup_64) 247 248#include "verify_cpu.S" 249 250#ifdef CONFIG_HOTPLUG_CPU 251/* 252 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 253 * up already except stack. We just set up stack here. Then call 254 * start_secondary() via .Ljump_to_C_code. 255 */ 256ENTRY(start_cpu0) 257 movq initial_stack(%rip), %rsp 258 UNWIND_HINT_EMPTY 259 jmp .Ljump_to_C_code 260ENDPROC(start_cpu0) 261#endif 262 263 /* Both SMP bootup and ACPI suspend change these variables */ 264 __REFDATA 265 .balign 8 266 GLOBAL(initial_code) 267 .quad x86_64_start_kernel 268 GLOBAL(initial_gs) 269 .quad INIT_PER_CPU_VAR(irq_stack_union) 270 GLOBAL(initial_stack) 271 /* 272 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel 273 * unwinder reliably detect the end of the stack. 274 */ 275 .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS 276 __FINITDATA 277 278 __INIT 279ENTRY(early_idt_handler_array) 280 i = 0 281 .rept NUM_EXCEPTION_VECTORS 282 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 283 UNWIND_HINT_IRET_REGS 284 pushq $0 # Dummy error code, to make stack frame uniform 285 .else 286 UNWIND_HINT_IRET_REGS offset=8 287 .endif 288 pushq $i # 72(%rsp) Vector number 289 jmp early_idt_handler_common 290 UNWIND_HINT_IRET_REGS 291 i = i + 1 292 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 293 .endr 294 UNWIND_HINT_IRET_REGS offset=16 295END(early_idt_handler_array) 296 297early_idt_handler_common: 298 /* 299 * The stack is the hardware frame, an error code or zero, and the 300 * vector number. 301 */ 302 cld 303 304 incl early_recursion_flag(%rip) 305 306 /* The vector number is currently in the pt_regs->di slot. */ 307 pushq %rsi /* pt_regs->si */ 308 movq 8(%rsp), %rsi /* RSI = vector number */ 309 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 310 pushq %rdx /* pt_regs->dx */ 311 pushq %rcx /* pt_regs->cx */ 312 pushq %rax /* pt_regs->ax */ 313 pushq %r8 /* pt_regs->r8 */ 314 pushq %r9 /* pt_regs->r9 */ 315 pushq %r10 /* pt_regs->r10 */ 316 pushq %r11 /* pt_regs->r11 */ 317 pushq %rbx /* pt_regs->bx */ 318 pushq %rbp /* pt_regs->bp */ 319 pushq %r12 /* pt_regs->r12 */ 320 pushq %r13 /* pt_regs->r13 */ 321 pushq %r14 /* pt_regs->r14 */ 322 pushq %r15 /* pt_regs->r15 */ 323 UNWIND_HINT_REGS 324 325 cmpq $14,%rsi /* Page fault? */ 326 jnz 10f 327 GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */ 328 call early_make_pgtable 329 andl %eax,%eax 330 jz 20f /* All good */ 331 33210: 333 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 334 call early_fixup_exception 335 33620: 337 decl early_recursion_flag(%rip) 338 jmp restore_regs_and_return_to_kernel 339END(early_idt_handler_common) 340 341 __INITDATA 342 343 .balign 4 344GLOBAL(early_recursion_flag) 345 .long 0 346 347#define NEXT_PAGE(name) \ 348 .balign PAGE_SIZE; \ 349GLOBAL(name) 350 351#ifdef CONFIG_PAGE_TABLE_ISOLATION 352/* 353 * Each PGD needs to be 8k long and 8k aligned. We do not 354 * ever go out to userspace with these, so we do not 355 * strictly *need* the second page, but this allows us to 356 * have a single set_pgd() implementation that does not 357 * need to worry about whether it has 4k or 8k to work 358 * with. 359 * 360 * This ensures PGDs are 8k long: 361 */ 362#define PTI_USER_PGD_FILL 512 363/* This ensures they are 8k-aligned: */ 364#define NEXT_PGD_PAGE(name) \ 365 .balign 2 * PAGE_SIZE; \ 366GLOBAL(name) 367#else 368#define NEXT_PGD_PAGE(name) NEXT_PAGE(name) 369#define PTI_USER_PGD_FILL 0 370#endif 371 372/* Automate the creation of 1 to 1 mapping pmd entries */ 373#define PMDS(START, PERM, COUNT) \ 374 i = 0 ; \ 375 .rept (COUNT) ; \ 376 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 377 i = i + 1 ; \ 378 .endr 379 380 __INITDATA 381NEXT_PGD_PAGE(early_top_pgt) 382 .fill 512,8,0 383 .fill PTI_USER_PGD_FILL,8,0 384 385NEXT_PAGE(early_dynamic_pgts) 386 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 387 388 .data 389 390#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) 391NEXT_PGD_PAGE(init_top_pgt) 392 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 393 .org init_top_pgt + L4_PAGE_OFFSET*8, 0 394 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 395 .org init_top_pgt + L4_START_KERNEL*8, 0 396 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 397 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 398 .fill PTI_USER_PGD_FILL,8,0 399 400NEXT_PAGE(level3_ident_pgt) 401 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 402 .fill 511, 8, 0 403NEXT_PAGE(level2_ident_pgt) 404 /* 405 * Since I easily can, map the first 1G. 406 * Don't set NX because code runs from these pages. 407 * 408 * Note: This sets _PAGE_GLOBAL despite whether 409 * the CPU supports it or it is enabled. But, 410 * the CPU should ignore the bit. 411 */ 412 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 413#else 414NEXT_PGD_PAGE(init_top_pgt) 415 .fill 512,8,0 416 .fill PTI_USER_PGD_FILL,8,0 417#endif 418 419#ifdef CONFIG_X86_5LEVEL 420NEXT_PAGE(level4_kernel_pgt) 421 .fill 511,8,0 422 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 423#endif 424 425NEXT_PAGE(level3_kernel_pgt) 426 .fill L3_START_KERNEL,8,0 427 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 428 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 429 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 430 431NEXT_PAGE(level2_kernel_pgt) 432 /* 433 * 512 MB kernel mapping. We spend a full page on this pagetable 434 * anyway. 435 * 436 * The kernel code+data+bss must not be bigger than that. 437 * 438 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 439 * If you want to increase this then increase MODULES_VADDR 440 * too.) 441 * 442 * This table is eventually used by the kernel during normal 443 * runtime. Care must be taken to clear out undesired bits 444 * later, like _PAGE_RW or _PAGE_GLOBAL in some cases. 445 */ 446 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 447 KERNEL_IMAGE_SIZE/PMD_SIZE) 448 449NEXT_PAGE(level2_fixmap_pgt) 450 .fill 506,8,0 451 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 452 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 453 .fill 5,8,0 454 455NEXT_PAGE(level1_fixmap_pgt) 456 .fill 512,8,0 457 458#undef PMDS 459 460 .data 461 .align 16 462 .globl early_gdt_descr 463early_gdt_descr: 464 .word GDT_ENTRIES*8-1 465early_gdt_descr_base: 466 .quad INIT_PER_CPU_VAR(gdt_page) 467 468ENTRY(phys_base) 469 /* This must match the first entry in level2_kernel_pgt */ 470 .quad 0x0000000000000000 471EXPORT_SYMBOL(phys_base) 472 473#include "../../x86/xen/xen-head.S" 474 475 __PAGE_ALIGNED_BSS 476NEXT_PAGE(empty_zero_page) 477 .skip PAGE_SIZE 478EXPORT_SYMBOL(empty_zero_page) 479 480