1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/boot/head.S 4 * 5 * Copyright (C) 1991, 1992, 1993 Linus Torvalds 6 */ 7 8/* 9 * head.S contains the 32-bit startup code. 10 * 11 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where 12 * the page directory will exist. The startup code will be overwritten by 13 * the page directory. [According to comments etc elsewhere on a compressed 14 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] 15 * 16 * Page 0 is deliberately kept safe, since System Management Mode code in 17 * laptops may need to access the BIOS data stored there. This is also 18 * useful for future device drivers that either access the BIOS via VM86 19 * mode. 20 */ 21 22/* 23 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 24 */ 25 .code32 26 .text 27 28#include <linux/init.h> 29#include <linux/linkage.h> 30#include <asm/segment.h> 31#include <asm/boot.h> 32#include <asm/msr.h> 33#include <asm/processor-flags.h> 34#include <asm/asm-offsets.h> 35#include <asm/bootparam.h> 36#include "pgtable.h" 37 38/* 39 * Locally defined symbols should be marked hidden: 40 */ 41 .hidden _bss 42 .hidden _ebss 43 .hidden _got 44 .hidden _egot 45 46 __HEAD 47 .code32 48SYM_FUNC_START(startup_32) 49 /* 50 * 32bit entry is 0 and it is ABI so immutable! 51 * If we come here directly from a bootloader, 52 * kernel(text+data+bss+brk) ramdisk, zero_page, command line 53 * all need to be under the 4G limit. 54 */ 55 cld 56 /* 57 * Test KEEP_SEGMENTS flag to see if the bootloader is asking 58 * us to not reload segments 59 */ 60 testb $KEEP_SEGMENTS, BP_loadflags(%esi) 61 jnz 1f 62 63 cli 64 movl $(__BOOT_DS), %eax 65 movl %eax, %ds 66 movl %eax, %es 67 movl %eax, %ss 681: 69 70/* 71 * Calculate the delta between where we were compiled to run 72 * at and where we were actually loaded at. This can only be done 73 * with a short local call on x86. Nothing else will tell us what 74 * address we are running at. The reserved chunk of the real-mode 75 * data at 0x1e4 (defined as a scratch field) are used as the stack 76 * for this calculation. Only 4 bytes are needed. 77 */ 78 leal (BP_scratch+4)(%esi), %esp 79 call 1f 801: popl %ebp 81 subl $1b, %ebp 82 83/* setup a stack and make sure cpu supports long mode. */ 84 movl $boot_stack_end, %eax 85 addl %ebp, %eax 86 movl %eax, %esp 87 88 call verify_cpu 89 testl %eax, %eax 90 jnz .Lno_longmode 91 92/* 93 * Compute the delta between where we were compiled to run at 94 * and where the code will actually run at. 95 * 96 * %ebp contains the address we are loaded at by the boot loader and %ebx 97 * contains the address where we should move the kernel image temporarily 98 * for safe in-place decompression. 99 */ 100 101#ifdef CONFIG_RELOCATABLE 102 movl %ebp, %ebx 103 movl BP_kernel_alignment(%esi), %eax 104 decl %eax 105 addl %eax, %ebx 106 notl %eax 107 andl %eax, %ebx 108 cmpl $LOAD_PHYSICAL_ADDR, %ebx 109 jge 1f 110#endif 111 movl $LOAD_PHYSICAL_ADDR, %ebx 1121: 113 114 /* Target address to relocate to for decompression */ 115 movl BP_init_size(%esi), %eax 116 subl $_end, %eax 117 addl %eax, %ebx 118 119/* 120 * Prepare for entering 64 bit mode 121 */ 122 123 /* Load new GDT with the 64bit segments using 32bit descriptor */ 124 addl %ebp, gdt+2(%ebp) 125 lgdt gdt(%ebp) 126 127 /* Enable PAE mode */ 128 movl %cr4, %eax 129 orl $X86_CR4_PAE, %eax 130 movl %eax, %cr4 131 132 /* 133 * Build early 4G boot pagetable 134 */ 135 /* 136 * If SEV is active then set the encryption mask in the page tables. 137 * This will insure that when the kernel is copied and decompressed 138 * it will be done so encrypted. 139 */ 140 call get_sev_encryption_bit 141 xorl %edx, %edx 142 testl %eax, %eax 143 jz 1f 144 subl $32, %eax /* Encryption bit is always above bit 31 */ 145 bts %eax, %edx /* Set encryption mask for page tables */ 1461: 147 148 /* Initialize Page tables to 0 */ 149 leal pgtable(%ebx), %edi 150 xorl %eax, %eax 151 movl $(BOOT_INIT_PGT_SIZE/4), %ecx 152 rep stosl 153 154 /* Build Level 4 */ 155 leal pgtable + 0(%ebx), %edi 156 leal 0x1007 (%edi), %eax 157 movl %eax, 0(%edi) 158 addl %edx, 4(%edi) 159 160 /* Build Level 3 */ 161 leal pgtable + 0x1000(%ebx), %edi 162 leal 0x1007(%edi), %eax 163 movl $4, %ecx 1641: movl %eax, 0x00(%edi) 165 addl %edx, 0x04(%edi) 166 addl $0x00001000, %eax 167 addl $8, %edi 168 decl %ecx 169 jnz 1b 170 171 /* Build Level 2 */ 172 leal pgtable + 0x2000(%ebx), %edi 173 movl $0x00000183, %eax 174 movl $2048, %ecx 1751: movl %eax, 0(%edi) 176 addl %edx, 4(%edi) 177 addl $0x00200000, %eax 178 addl $8, %edi 179 decl %ecx 180 jnz 1b 181 182 /* Enable the boot page tables */ 183 leal pgtable(%ebx), %eax 184 movl %eax, %cr3 185 186 /* Enable Long mode in EFER (Extended Feature Enable Register) */ 187 movl $MSR_EFER, %ecx 188 rdmsr 189 btsl $_EFER_LME, %eax 190 wrmsr 191 192 /* After gdt is loaded */ 193 xorl %eax, %eax 194 lldt %ax 195 movl $__BOOT_TSS, %eax 196 ltr %ax 197 198 /* 199 * Setup for the jump to 64bit mode 200 * 201 * When the jump is performend we will be in long mode but 202 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 203 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use 204 * the new gdt/idt that has __KERNEL_CS with CS.L = 1. 205 * We place all of the values on our mini stack so lret can 206 * used to perform that far jump. 207 */ 208 pushl $__KERNEL_CS 209 leal startup_64(%ebp), %eax 210#ifdef CONFIG_EFI_MIXED 211 movl efi32_config(%ebp), %ebx 212 cmp $0, %ebx 213 jz 1f 214 leal handover_entry(%ebp), %eax 2151: 216#endif 217 pushl %eax 218 219 /* Enter paged protected Mode, activating Long Mode */ 220 movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */ 221 movl %eax, %cr0 222 223 /* Jump from 32bit compatibility mode into 64bit mode. */ 224 lret 225SYM_FUNC_END(startup_32) 226 227#ifdef CONFIG_EFI_MIXED 228 .org 0x190 229SYM_FUNC_START(efi32_stub_entry) 230 add $0x4, %esp /* Discard return address */ 231 popl %ecx 232 popl %edx 233 popl %esi 234 235 leal (BP_scratch+4)(%esi), %esp 236 call 1f 2371: pop %ebp 238 subl $1b, %ebp 239 240 movl %ecx, efi32_config(%ebp) 241 movl %edx, efi32_config+8(%ebp) 242 sgdtl efi32_boot_gdt(%ebp) 243 244 leal efi32_config(%ebp), %eax 245 movl %eax, efi_config(%ebp) 246 247 jmp startup_32 248SYM_FUNC_END(efi32_stub_entry) 249#endif 250 251 .code64 252 .org 0x200 253SYM_CODE_START(startup_64) 254 /* 255 * 64bit entry is 0x200 and it is ABI so immutable! 256 * We come here either from startup_32 or directly from a 257 * 64bit bootloader. 258 * If we come here from a bootloader, kernel(text+data+bss+brk), 259 * ramdisk, zero_page, command line could be above 4G. 260 * We depend on an identity mapped page table being provided 261 * that maps our entire kernel(text+data+bss+brk), zero page 262 * and command line. 263 */ 264 265 /* Setup data segments. */ 266 xorl %eax, %eax 267 movl %eax, %ds 268 movl %eax, %es 269 movl %eax, %ss 270 movl %eax, %fs 271 movl %eax, %gs 272 273 /* 274 * Compute the decompressed kernel start address. It is where 275 * we were loaded at aligned to a 2M boundary. %rbp contains the 276 * decompressed kernel start address. 277 * 278 * If it is a relocatable kernel then decompress and run the kernel 279 * from load address aligned to 2MB addr, otherwise decompress and 280 * run the kernel from LOAD_PHYSICAL_ADDR 281 * 282 * We cannot rely on the calculation done in 32-bit mode, since we 283 * may have been invoked via the 64-bit entry point. 284 */ 285 286 /* Start with the delta to where the kernel will run at. */ 287#ifdef CONFIG_RELOCATABLE 288 leaq startup_32(%rip) /* - $startup_32 */, %rbp 289 movl BP_kernel_alignment(%rsi), %eax 290 decl %eax 291 addq %rax, %rbp 292 notq %rax 293 andq %rax, %rbp 294 cmpq $LOAD_PHYSICAL_ADDR, %rbp 295 jge 1f 296#endif 297 movq $LOAD_PHYSICAL_ADDR, %rbp 2981: 299 300 /* Target address to relocate to for decompression */ 301 movl BP_init_size(%rsi), %ebx 302 subl $_end, %ebx 303 addq %rbp, %rbx 304 305 /* Set up the stack */ 306 leaq boot_stack_end(%rbx), %rsp 307 308 /* 309 * paging_prepare() and cleanup_trampoline() below can have GOT 310 * references. Adjust the table with address we are running at. 311 * 312 * Zero RAX for adjust_got: the GOT was not adjusted before; 313 * there's no adjustment to undo. 314 */ 315 xorq %rax, %rax 316 317 /* 318 * Calculate the address the binary is loaded at and use it as 319 * a GOT adjustment. 320 */ 321 call 1f 3221: popq %rdi 323 subq $1b, %rdi 324 325 call .Ladjust_got 326 327 /* 328 * At this point we are in long mode with 4-level paging enabled, 329 * but we might want to enable 5-level paging or vice versa. 330 * 331 * The problem is that we cannot do it directly. Setting or clearing 332 * CR4.LA57 in long mode would trigger #GP. So we need to switch off 333 * long mode and paging first. 334 * 335 * We also need a trampoline in lower memory to switch over from 336 * 4- to 5-level paging for cases when the bootloader puts the kernel 337 * above 4G, but didn't enable 5-level paging for us. 338 * 339 * The same trampoline can be used to switch from 5- to 4-level paging 340 * mode, like when starting 4-level paging kernel via kexec() when 341 * original kernel worked in 5-level paging mode. 342 * 343 * For the trampoline, we need the top page table to reside in lower 344 * memory as we don't have a way to load 64-bit values into CR3 in 345 * 32-bit mode. 346 * 347 * We go though the trampoline even if we don't have to: if we're 348 * already in a desired paging mode. This way the trampoline code gets 349 * tested on every boot. 350 */ 351 352 /* Make sure we have GDT with 32-bit code segment */ 353 leaq gdt(%rip), %rax 354 movq %rax, gdt64+2(%rip) 355 lgdt gdt64(%rip) 356 357 /* 358 * paging_prepare() sets up the trampoline and checks if we need to 359 * enable 5-level paging. 360 * 361 * paging_prepare() returns a two-quadword structure which lands 362 * into RDX:RAX: 363 * - Address of the trampoline is returned in RAX. 364 * - Non zero RDX means trampoline needs to enable 5-level 365 * paging. 366 * 367 * RSI holds real mode data and needs to be preserved across 368 * this function call. 369 */ 370 pushq %rsi 371 movq %rsi, %rdi /* real mode address */ 372 call paging_prepare 373 popq %rsi 374 375 /* Save the trampoline address in RCX */ 376 movq %rax, %rcx 377 378 /* 379 * Load the address of trampoline_return() into RDI. 380 * It will be used by the trampoline to return to the main code. 381 */ 382 leaq trampoline_return(%rip), %rdi 383 384 /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ 385 pushq $__KERNEL32_CS 386 leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax 387 pushq %rax 388 lretq 389trampoline_return: 390 /* Restore the stack, the 32-bit trampoline uses its own stack */ 391 leaq boot_stack_end(%rbx), %rsp 392 393 /* 394 * cleanup_trampoline() would restore trampoline memory. 395 * 396 * RDI is address of the page table to use instead of page table 397 * in trampoline memory (if required). 398 * 399 * RSI holds real mode data and needs to be preserved across 400 * this function call. 401 */ 402 pushq %rsi 403 leaq top_pgtable(%rbx), %rdi 404 call cleanup_trampoline 405 popq %rsi 406 407 /* Zero EFLAGS */ 408 pushq $0 409 popfq 410 411 /* 412 * Previously we've adjusted the GOT with address the binary was 413 * loaded at. Now we need to re-adjust for relocation address. 414 * 415 * Calculate the address the binary is loaded at, so that we can 416 * undo the previous GOT adjustment. 417 */ 418 call 1f 4191: popq %rax 420 subq $1b, %rax 421 422 /* The new adjustment is the relocation address */ 423 movq %rbx, %rdi 424 call .Ladjust_got 425 426/* 427 * Copy the compressed kernel to the end of our buffer 428 * where decompression in place becomes safe. 429 */ 430 pushq %rsi 431 leaq (_bss-8)(%rip), %rsi 432 leaq (_bss-8)(%rbx), %rdi 433 movq $_bss /* - $startup_32 */, %rcx 434 shrq $3, %rcx 435 std 436 rep movsq 437 cld 438 popq %rsi 439 440/* 441 * Jump to the relocated address. 442 */ 443 leaq .Lrelocated(%rbx), %rax 444 jmp *%rax 445SYM_CODE_END(startup_64) 446 447#ifdef CONFIG_EFI_STUB 448 449/* The entry point for the PE/COFF executable is efi_pe_entry. */ 450SYM_FUNC_START(efi_pe_entry) 451 movq %rcx, efi64_config(%rip) /* Handle */ 452 movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */ 453 454 leaq efi64_config(%rip), %rax 455 movq %rax, efi_config(%rip) 456 457 call 1f 4581: popq %rbp 459 subq $1b, %rbp 460 461 /* 462 * Relocate efi_config->call(). 463 */ 464 addq %rbp, efi64_config+40(%rip) 465 466 movq %rax, %rdi 467 call make_boot_params 468 cmpq $0,%rax 469 je fail 470 mov %rax, %rsi 471 leaq startup_32(%rip), %rax 472 movl %eax, BP_code32_start(%rsi) 473 jmp 2f /* Skip the relocation */ 474 475handover_entry: 476 call 1f 4771: popq %rbp 478 subq $1b, %rbp 479 480 /* 481 * Relocate efi_config->call(). 482 */ 483 movq efi_config(%rip), %rax 484 addq %rbp, 40(%rax) 4852: 486 movq efi_config(%rip), %rdi 487 call efi_main 488 movq %rax,%rsi 489 cmpq $0,%rax 490 jne 2f 491fail: 492 /* EFI init failed, so hang. */ 493 hlt 494 jmp fail 4952: 496 movl BP_code32_start(%esi), %eax 497 leaq startup_64(%rax), %rax 498 jmp *%rax 499SYM_FUNC_END(efi_pe_entry) 500 501 .org 0x390 502SYM_FUNC_START(efi64_stub_entry) 503 movq %rdi, efi64_config(%rip) /* Handle */ 504 movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */ 505 506 leaq efi64_config(%rip), %rax 507 movq %rax, efi_config(%rip) 508 509 movq %rdx, %rsi 510 jmp handover_entry 511SYM_FUNC_END(efi64_stub_entry) 512#endif 513 514 .text 515SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) 516 517/* 518 * Clear BSS (stack is currently empty) 519 */ 520 xorl %eax, %eax 521 leaq _bss(%rip), %rdi 522 leaq _ebss(%rip), %rcx 523 subq %rdi, %rcx 524 shrq $3, %rcx 525 rep stosq 526 527/* 528 * Do the extraction, and jump to the new kernel.. 529 */ 530 pushq %rsi /* Save the real mode argument */ 531 movq %rsi, %rdi /* real mode address */ 532 leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ 533 leaq input_data(%rip), %rdx /* input_data */ 534 movl $z_input_len, %ecx /* input_len */ 535 movq %rbp, %r8 /* output target address */ 536 movq $z_output_len, %r9 /* decompressed length, end of relocs */ 537 call extract_kernel /* returns kernel location in %rax */ 538 popq %rsi 539 540/* 541 * Jump to the decompressed kernel. 542 */ 543 jmp *%rax 544SYM_FUNC_END(.Lrelocated) 545 546/* 547 * Adjust the global offset table 548 * 549 * RAX is the previous adjustment of the table to undo (use 0 if it's the 550 * first time we touch GOT). 551 * RDI is the new adjustment to apply. 552 */ 553.Ladjust_got: 554 /* Walk through the GOT adding the address to the entries */ 555 leaq _got(%rip), %rdx 556 leaq _egot(%rip), %rcx 5571: 558 cmpq %rcx, %rdx 559 jae 2f 560 subq %rax, (%rdx) /* Undo previous adjustment */ 561 addq %rdi, (%rdx) /* Apply the new adjustment */ 562 addq $8, %rdx 563 jmp 1b 5642: 565 ret 566 567 .code32 568/* 569 * This is the 32-bit trampoline that will be copied over to low memory. 570 * 571 * RDI contains the return address (might be above 4G). 572 * ECX contains the base address of the trampoline memory. 573 * Non zero RDX means trampoline needs to enable 5-level paging. 574 */ 575SYM_CODE_START(trampoline_32bit_src) 576 /* Set up data and stack segments */ 577 movl $__KERNEL_DS, %eax 578 movl %eax, %ds 579 movl %eax, %ss 580 581 /* Set up new stack */ 582 leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp 583 584 /* Disable paging */ 585 movl %cr0, %eax 586 btrl $X86_CR0_PG_BIT, %eax 587 movl %eax, %cr0 588 589 /* Check what paging mode we want to be in after the trampoline */ 590 cmpl $0, %edx 591 jz 1f 592 593 /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */ 594 movl %cr4, %eax 595 testl $X86_CR4_LA57, %eax 596 jnz 3f 597 jmp 2f 5981: 599 /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */ 600 movl %cr4, %eax 601 testl $X86_CR4_LA57, %eax 602 jz 3f 6032: 604 /* Point CR3 to the trampoline's new top level page table */ 605 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax 606 movl %eax, %cr3 6073: 608 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ 609 pushl %ecx 610 pushl %edx 611 movl $MSR_EFER, %ecx 612 rdmsr 613 btsl $_EFER_LME, %eax 614 wrmsr 615 popl %edx 616 popl %ecx 617 618 /* Enable PAE and LA57 (if required) paging modes */ 619 movl $X86_CR4_PAE, %eax 620 cmpl $0, %edx 621 jz 1f 622 orl $X86_CR4_LA57, %eax 6231: 624 movl %eax, %cr4 625 626 /* Calculate address of paging_enabled() once we are executing in the trampoline */ 627 leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax 628 629 /* Prepare the stack for far return to Long Mode */ 630 pushl $__KERNEL_CS 631 pushl %eax 632 633 /* Enable paging again */ 634 movl $(X86_CR0_PG | X86_CR0_PE), %eax 635 movl %eax, %cr0 636 637 lret 638SYM_CODE_END(trampoline_32bit_src) 639 640 .code64 641SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled) 642 /* Return from the trampoline */ 643 jmp *%rdi 644SYM_FUNC_END(.Lpaging_enabled) 645 646 /* 647 * The trampoline code has a size limit. 648 * Make sure we fail to compile if the trampoline code grows 649 * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes. 650 */ 651 .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE 652 653 .code32 654SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode) 655 /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ 6561: 657 hlt 658 jmp 1b 659SYM_FUNC_END(.Lno_longmode) 660 661#include "../../kernel/verify_cpu.S" 662 663 .data 664SYM_DATA_START_LOCAL(gdt64) 665 .word gdt_end - gdt 666 .quad 0 667SYM_DATA_END(gdt64) 668 .balign 8 669SYM_DATA_START_LOCAL(gdt) 670 .word gdt_end - gdt 671 .long gdt 672 .word 0 673 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */ 674 .quad 0x00af9a000000ffff /* __KERNEL_CS */ 675 .quad 0x00cf92000000ffff /* __KERNEL_DS */ 676 .quad 0x0080890000000000 /* TS descriptor */ 677 .quad 0x0000000000000000 /* TS continued */ 678SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) 679 680#ifdef CONFIG_EFI_STUB 681SYM_DATA_LOCAL(efi_config, .quad 0) 682 683#ifdef CONFIG_EFI_MIXED 684SYM_DATA_START(efi32_config) 685 .fill 5,8,0 686 .quad efi64_thunk 687 .byte 0 688SYM_DATA_END(efi32_config) 689#endif 690 691SYM_DATA_START(efi64_config) 692 .fill 5,8,0 693 .quad efi_call 694 .byte 1 695SYM_DATA_END(efi64_config) 696#endif /* CONFIG_EFI_STUB */ 697 698/* 699 * Stack and heap for uncompression 700 */ 701 .bss 702 .balign 4 703SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0) 704 705SYM_DATA_START_LOCAL(boot_stack) 706 .fill BOOT_STACK_SIZE, 1, 0 707SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end) 708 709/* 710 * Space for page tables (not in .bss so not zeroed) 711 */ 712 .section ".pgtable","a",@nobits 713 .balign 4096 714SYM_DATA_LOCAL(pgtable, .fill BOOT_PGT_SIZE, 1, 0) 715 716/* 717 * The page table is going to be used instead of page table in the trampoline 718 * memory. 719 */ 720SYM_DATA_LOCAL(top_pgtable, .fill PAGE_SIZE, 1, 0) 721