1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2250c2277SThomas Gleixner/* 35b171e82SAlexander Kuleshov * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4250c2277SThomas Gleixner * 5250c2277SThomas Gleixner * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6250c2277SThomas Gleixner * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7250c2277SThomas Gleixner * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8250c2277SThomas Gleixner * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9250c2277SThomas Gleixner * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10250c2277SThomas Gleixner */ 11250c2277SThomas Gleixner 12250c2277SThomas Gleixner 13250c2277SThomas Gleixner#include <linux/linkage.h> 14250c2277SThomas Gleixner#include <linux/threads.h> 15250c2277SThomas Gleixner#include <linux/init.h> 16ca5999fdSMike Rapoport#include <linux/pgtable.h> 1765fddcfcSMike Rapoport#include <asm/segment.h> 18250c2277SThomas Gleixner#include <asm/page.h> 19250c2277SThomas Gleixner#include <asm/msr.h> 20250c2277SThomas Gleixner#include <asm/cache.h> 21369101daSCyrill Gorcunov#include <asm/processor-flags.h> 22b12d8db8STejun Heo#include <asm/percpu.h> 239900aa2fSH. Peter Anvin#include <asm/nops.h> 247bbcdb1cSAndy Lutomirski#include "../entry/calling.h" 25784d5699SAl Viro#include <asm/export.h> 26bd89004fSPeter Zijlstra#include <asm/nospec-branch.h> 2705ab1d8aSFeng Tang#include <asm/fixmap.h> 28250c2277SThomas Gleixner 2975da04f7SThomas Gleixner/* 3075da04f7SThomas Gleixner * We are not able to switch in one step to the final KERNEL ADDRESS SPACE 31250c2277SThomas Gleixner * because we need identity-mapped pages. 32250c2277SThomas Gleixner */ 33b9952ec7SKirill A. Shutemov#define l4_index(x) (((x) >> 39) & 511) 34a6523748SEduardo Habkost#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 35a6523748SEduardo Habkost 36b9952ec7SKirill A. ShutemovL4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 37b9952ec7SKirill A. ShutemovL4_START_KERNEL = l4_index(__START_KERNEL_map) 38b9952ec7SKirill A. Shutemov 39a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map) 40a6523748SEduardo Habkost 41250c2277SThomas Gleixner .text 424ae59b91STim Abbott __HEAD 43250c2277SThomas Gleixner .code64 4437818afdSJiri SlabySYM_CODE_START_NOALIGN(startup_64) 452704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 46250c2277SThomas Gleixner /* 471256276cSKonrad Rzeszutek Wilk * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 48250c2277SThomas Gleixner * and someone has loaded an identity mapped page table 49250c2277SThomas Gleixner * for us. These identity mapped page tables map all of the 50250c2277SThomas Gleixner * kernel pages and possibly all of memory. 51250c2277SThomas Gleixner * 528170e6beSH. Peter Anvin * %rsi holds a physical pointer to real_mode_data. 53250c2277SThomas Gleixner * 54250c2277SThomas Gleixner * We come here either directly from a 64bit bootloader, or from 555b171e82SAlexander Kuleshov * arch/x86/boot/compressed/head_64.S. 56250c2277SThomas Gleixner * 57250c2277SThomas Gleixner * We only come here initially at boot nothing else comes here. 58250c2277SThomas Gleixner * 59250c2277SThomas Gleixner * Since we may be loaded at an address different from what we were 60250c2277SThomas Gleixner * compiled to run at we first fixup the physical addresses in our page 61250c2277SThomas Gleixner * tables and then reload them. 62250c2277SThomas Gleixner */ 63250c2277SThomas Gleixner 6422dc3918SJosh Poimboeuf /* Set up the stack for verify_cpu(), similar to initial_stack below */ 656627eb25SH. Peter Anvin (Intel) leaq (__end_init_task - FRAME_SIZE)(%rip), %rsp 6691ed140dSBorislav Petkov 67866b556eSJoerg Roedel leaq _text(%rip), %rdi 68*469693d8SMichael Roth 69*469693d8SMichael Roth /* 70*469693d8SMichael Roth * initial_gs points to initial fixed_percpu_data struct with storage for 71*469693d8SMichael Roth * the stack protector canary. Global pointer fixups are needed at this 72*469693d8SMichael Roth * stage, so apply them as is done in fixup_pointer(), and initialize %gs 73*469693d8SMichael Roth * such that the canary can be accessed at %gs:40 for subsequent C calls. 74*469693d8SMichael Roth */ 75*469693d8SMichael Roth movl $MSR_GS_BASE, %ecx 76*469693d8SMichael Roth movq initial_gs(%rip), %rax 77*469693d8SMichael Roth movq $_text, %rdx 78*469693d8SMichael Roth subq %rdx, %rax 79*469693d8SMichael Roth addq %rdi, %rax 80*469693d8SMichael Roth movq %rax, %rdx 81*469693d8SMichael Roth shrq $32, %rdx 82*469693d8SMichael Roth wrmsr 83*469693d8SMichael Roth 84866b556eSJoerg Roedel pushq %rsi 85866b556eSJoerg Roedel call startup_64_setup_env 86866b556eSJoerg Roedel popq %rsi 87866b556eSJoerg Roedel 88bcce8290SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT 89bcce8290SMichael Roth /* 90bcce8290SMichael Roth * Activate SEV/SME memory encryption if supported/enabled. This needs to 91bcce8290SMichael Roth * be done now, since this also includes setup of the SEV-SNP CPUID table, 92bcce8290SMichael Roth * which needs to be done before any CPUID instructions are executed in 93bcce8290SMichael Roth * subsequent code. 94bcce8290SMichael Roth */ 95bcce8290SMichael Roth movq %rsi, %rdi 96bcce8290SMichael Roth pushq %rsi 97bcce8290SMichael Roth call sme_enable 98bcce8290SMichael Roth popq %rsi 99bcce8290SMichael Roth#endif 100bcce8290SMichael Roth 101866b556eSJoerg Roedel /* Now switch to __KERNEL_CS so IRET works reliably */ 102866b556eSJoerg Roedel pushq $__KERNEL_CS 103866b556eSJoerg Roedel leaq .Lon_kernel_cs(%rip), %rax 104866b556eSJoerg Roedel pushq %rax 105866b556eSJoerg Roedel lretq 106866b556eSJoerg Roedel 107866b556eSJoerg Roedel.Lon_kernel_cs: 108866b556eSJoerg Roedel UNWIND_HINT_EMPTY 109866b556eSJoerg Roedel 11004633df0SBorislav Petkov /* Sanitize CPU configuration */ 11104633df0SBorislav Petkov call verify_cpu 11204633df0SBorislav Petkov 1135868f365STom Lendacky /* 1145868f365STom Lendacky * Perform pagetable fixups. Additionally, if SME is active, encrypt 1155868f365STom Lendacky * the kernel and retrieve the modifier (SME encryption mask if SME 1165868f365STom Lendacky * is active) to be added to the initial pgdir entry that will be 1175868f365STom Lendacky * programmed into CR3. 1185868f365STom Lendacky */ 119250c2277SThomas Gleixner leaq _text(%rip), %rdi 120c88d7150SKirill A. Shutemov pushq %rsi 121c88d7150SKirill A. Shutemov call __startup_64 122c88d7150SKirill A. Shutemov popq %rsi 123250c2277SThomas Gleixner 1245868f365STom Lendacky /* Form the CR3 value being sure to include the CR3 modifier */ 1255868f365STom Lendacky addq $(early_top_pgt - __START_KERNEL_map), %rax 1268170e6beSH. Peter Anvin jmp 1f 12737818afdSJiri SlabySYM_CODE_END(startup_64) 12837818afdSJiri Slaby 129bc7b11c0SJiri SlabySYM_CODE_START(secondary_startup_64) 1302704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 1313e3f0695SPeter Zijlstra ANNOTATE_NOENDBR 132250c2277SThomas Gleixner /* 1331256276cSKonrad Rzeszutek Wilk * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 134250c2277SThomas Gleixner * and someone has loaded a mapped page table. 135250c2277SThomas Gleixner * 1368170e6beSH. Peter Anvin * %rsi holds a physical pointer to real_mode_data. 137250c2277SThomas Gleixner * 138250c2277SThomas Gleixner * We come here either from startup_64 (using physical addresses) 139250c2277SThomas Gleixner * or from trampoline.S (using virtual addresses). 140250c2277SThomas Gleixner * 141250c2277SThomas Gleixner * Using virtual addresses from trampoline.S removes the need 142250c2277SThomas Gleixner * to have any identity mapped pages in the kernel page table 143250c2277SThomas Gleixner * after the boot processor executes this code. 144250c2277SThomas Gleixner */ 145250c2277SThomas Gleixner 14604633df0SBorislav Petkov /* Sanitize CPU configuration */ 14704633df0SBorislav Petkov call verify_cpu 14804633df0SBorislav Petkov 1495868f365STom Lendacky /* 1503ecacdbdSJoerg Roedel * The secondary_startup_64_no_verify entry point is only used by 1513ecacdbdSJoerg Roedel * SEV-ES guests. In those guests the call to verify_cpu() would cause 1523ecacdbdSJoerg Roedel * #VC exceptions which can not be handled at this stage of secondary 1533ecacdbdSJoerg Roedel * CPU bringup. 1543ecacdbdSJoerg Roedel * 1553ecacdbdSJoerg Roedel * All non SEV-ES systems, especially Intel systems, need to execute 1563ecacdbdSJoerg Roedel * verify_cpu() above to make sure NX is enabled. 1573ecacdbdSJoerg Roedel */ 1583ecacdbdSJoerg RoedelSYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) 1593ecacdbdSJoerg Roedel UNWIND_HINT_EMPTY 1603e3f0695SPeter Zijlstra ANNOTATE_NOENDBR 1613ecacdbdSJoerg Roedel 1623ecacdbdSJoerg Roedel /* 1635868f365STom Lendacky * Retrieve the modifier (SME encryption mask if SME is active) to be 1645868f365STom Lendacky * added to the initial pgdir entry that will be programmed into CR3. 1655868f365STom Lendacky */ 166*469693d8SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT 167*469693d8SMichael Roth movq sme_me_mask, %rax 168*469693d8SMichael Roth#else 169*469693d8SMichael Roth xorq %rax, %rax 170*469693d8SMichael Roth#endif 1715868f365STom Lendacky 1725868f365STom Lendacky /* Form the CR3 value being sure to include the CR3 modifier */ 1735868f365STom Lendacky addq $(init_top_pgt - __START_KERNEL_map), %rax 1748170e6beSH. Peter Anvin1: 1758170e6beSH. Peter Anvin 176032370b9SKirill A. Shutemov /* Enable PAE mode, PGE and LA57 */ 1778170e6beSH. Peter Anvin movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 178032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 17939b95522SKirill A. Shutemov testl $1, __pgtable_l5_enabled(%rip) 1806f9dd329SKirill A. Shutemov jz 1f 181032370b9SKirill A. Shutemov orl $X86_CR4_LA57, %ecx 1826f9dd329SKirill A. Shutemov1: 183032370b9SKirill A. Shutemov#endif 1848170e6beSH. Peter Anvin movq %rcx, %cr4 185250c2277SThomas Gleixner 186032370b9SKirill A. Shutemov /* Setup early boot stage 4-/5-level pagetables. */ 187250c2277SThomas Gleixner addq phys_base(%rip), %rax 188c9f09539SJoerg Roedel 189c9f09539SJoerg Roedel /* 190c9f09539SJoerg Roedel * For SEV guests: Verify that the C-bit is correct. A malicious 191c9f09539SJoerg Roedel * hypervisor could lie about the C-bit position to perform a ROP 192c9f09539SJoerg Roedel * attack on the guest by writing to the unencrypted stack and wait for 193c9f09539SJoerg Roedel * the next RET instruction. 194c9f09539SJoerg Roedel * %rsi carries pointer to realmode data and is callee-clobbered. Save 195c9f09539SJoerg Roedel * and restore it. 196c9f09539SJoerg Roedel */ 197c9f09539SJoerg Roedel pushq %rsi 198c9f09539SJoerg Roedel movq %rax, %rdi 199c9f09539SJoerg Roedel call sev_verify_cbit 200c9f09539SJoerg Roedel popq %rsi 201c9f09539SJoerg Roedel 202f154f290SJoerg Roedel /* 203f154f290SJoerg Roedel * Switch to new page-table 204f154f290SJoerg Roedel * 205f154f290SJoerg Roedel * For the boot CPU this switches to early_top_pgt which still has the 206f154f290SJoerg Roedel * indentity mappings present. The secondary CPUs will switch to the 207f154f290SJoerg Roedel * init_top_pgt here, away from the trampoline_pgd and unmap the 208f154f290SJoerg Roedel * indentity mapped ranges. 209f154f290SJoerg Roedel */ 210250c2277SThomas Gleixner movq %rax, %cr3 211250c2277SThomas Gleixner 212f154f290SJoerg Roedel /* 213f154f290SJoerg Roedel * Do a global TLB flush after the CR3 switch to make sure the TLB 214f154f290SJoerg Roedel * entries from the identity mapping are flushed. 215f154f290SJoerg Roedel */ 216f154f290SJoerg Roedel movq %cr4, %rcx 217f154f290SJoerg Roedel movq %rcx, %rax 218f154f290SJoerg Roedel xorq $X86_CR4_PGE, %rcx 219f154f290SJoerg Roedel movq %rcx, %cr4 220f154f290SJoerg Roedel movq %rax, %cr4 221f154f290SJoerg Roedel 222250c2277SThomas Gleixner /* Ensure I am executing from virtual addresses */ 223250c2277SThomas Gleixner movq $1f, %rax 224bd89004fSPeter Zijlstra ANNOTATE_RETPOLINE_SAFE 225250c2277SThomas Gleixner jmp *%rax 226250c2277SThomas Gleixner1: 2272704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 2283e3f0695SPeter Zijlstra ANNOTATE_NOENDBR // above 229250c2277SThomas Gleixner 230e04b8833SJoerg Roedel /* 231e04b8833SJoerg Roedel * We must switch to a new descriptor in kernel space for the GDT 232e04b8833SJoerg Roedel * because soon the kernel won't have access anymore to the userspace 233e04b8833SJoerg Roedel * addresses where we're currently running on. We have to do that here 234e04b8833SJoerg Roedel * because in 32bit we couldn't load a 64bit linear address. 235e04b8833SJoerg Roedel */ 236e04b8833SJoerg Roedel lgdt early_gdt_descr(%rip) 237e04b8833SJoerg Roedel 2387b99819dSJoerg Roedel /* set up data segments */ 2397b99819dSJoerg Roedel xorl %eax,%eax 2407b99819dSJoerg Roedel movl %eax,%ds 2417b99819dSJoerg Roedel movl %eax,%ss 2427b99819dSJoerg Roedel movl %eax,%es 2437b99819dSJoerg Roedel 2447b99819dSJoerg Roedel /* 2457b99819dSJoerg Roedel * We don't really need to load %fs or %gs, but load them anyway 2467b99819dSJoerg Roedel * to kill any stale realmode selectors. This allows execution 2477b99819dSJoerg Roedel * under VT hardware. 2487b99819dSJoerg Roedel */ 2497b99819dSJoerg Roedel movl %eax,%fs 2507b99819dSJoerg Roedel movl %eax,%gs 2517b99819dSJoerg Roedel 2527b99819dSJoerg Roedel /* Set up %gs. 2537b99819dSJoerg Roedel * 2547b99819dSJoerg Roedel * The base of %gs always points to fixed_percpu_data. If the 2557b99819dSJoerg Roedel * stack protector canary is enabled, it is located at %gs:40. 2567b99819dSJoerg Roedel * Note that, on SMP, the boot cpu uses init data section until 2577b99819dSJoerg Roedel * the per cpu areas are set up. 2587b99819dSJoerg Roedel */ 2597b99819dSJoerg Roedel movl $MSR_GS_BASE,%ecx 2607b99819dSJoerg Roedel movl initial_gs(%rip),%eax 2617b99819dSJoerg Roedel movl initial_gs+4(%rip),%edx 2627b99819dSJoerg Roedel wrmsr 2637b99819dSJoerg Roedel 2643add38cbSJoerg Roedel /* 2653add38cbSJoerg Roedel * Setup a boot time stack - Any secondary CPU will have lost its stack 2663add38cbSJoerg Roedel * by now because the cr3-switch above unmaps the real-mode stack 2673add38cbSJoerg Roedel */ 2683add38cbSJoerg Roedel movq initial_stack(%rip), %rsp 2693add38cbSJoerg Roedel 270f5963ba7SJoerg Roedel /* Setup and Load IDT */ 271f5963ba7SJoerg Roedel pushq %rsi 272f5963ba7SJoerg Roedel call early_setup_idt 273f5963ba7SJoerg Roedel popq %rsi 274f5963ba7SJoerg Roedel 275250c2277SThomas Gleixner /* Check if nx is implemented */ 276250c2277SThomas Gleixner movl $0x80000001, %eax 277250c2277SThomas Gleixner cpuid 278250c2277SThomas Gleixner movl %edx,%edi 279250c2277SThomas Gleixner 280250c2277SThomas Gleixner /* Setup EFER (Extended Feature Enable Register) */ 281250c2277SThomas Gleixner movl $MSR_EFER, %ecx 282250c2277SThomas Gleixner rdmsr 283250c2277SThomas Gleixner btsl $_EFER_SCE, %eax /* Enable System Call */ 284250c2277SThomas Gleixner btl $20,%edi /* No Execute supported? */ 285250c2277SThomas Gleixner jnc 1f 286250c2277SThomas Gleixner btsl $_EFER_NX, %eax 28778d77df7SH. Peter Anvin btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 288250c2277SThomas Gleixner1: wrmsr /* Make changes effective */ 289250c2277SThomas Gleixner 290250c2277SThomas Gleixner /* Setup cr0 */ 291369101daSCyrill Gorcunov movl $CR0_STATE, %eax 292250c2277SThomas Gleixner /* Make changes effective */ 293250c2277SThomas Gleixner movq %rax, %cr0 294250c2277SThomas Gleixner 295250c2277SThomas Gleixner /* zero EFLAGS after setting rsp */ 296250c2277SThomas Gleixner pushq $0 297250c2277SThomas Gleixner popfq 298250c2277SThomas Gleixner 2998170e6beSH. Peter Anvin /* rsi is pointer to real mode structure with interesting info. 300250c2277SThomas Gleixner pass it to C */ 3018170e6beSH. Peter Anvin movq %rsi, %rdi 302250c2277SThomas Gleixner 30379d243a0SBorislav Petkov.Ljump_to_C_code: 304a9468df5SJosh Poimboeuf /* 305a9468df5SJosh Poimboeuf * Jump to run C code and to be on a real kernel address. 306250c2277SThomas Gleixner * Since we are running on identity-mapped space we have to jump 307250c2277SThomas Gleixner * to the full 64bit address, this is only possible as indirect 308250c2277SThomas Gleixner * jump. In addition we need to ensure %cs is set so we make this 309250c2277SThomas Gleixner * a far return. 3108170e6beSH. Peter Anvin * 3118170e6beSH. Peter Anvin * Note: do not change to far jump indirect with 64bit offset. 3128170e6beSH. Peter Anvin * 3138170e6beSH. Peter Anvin * AMD does not support far jump indirect with 64bit offset. 3148170e6beSH. Peter Anvin * AMD64 Architecture Programmer's Manual, Volume 3: states only 3158170e6beSH. Peter Anvin * JMP FAR mem16:16 FF /5 Far jump indirect, 3168170e6beSH. Peter Anvin * with the target specified by a far pointer in memory. 3178170e6beSH. Peter Anvin * JMP FAR mem16:32 FF /5 Far jump indirect, 3188170e6beSH. Peter Anvin * with the target specified by a far pointer in memory. 3198170e6beSH. Peter Anvin * 3208170e6beSH. Peter Anvin * Intel64 does support 64bit offset. 3218170e6beSH. Peter Anvin * Software Developer Manual Vol 2: states: 3228170e6beSH. Peter Anvin * FF /5 JMP m16:16 Jump far, absolute indirect, 3238170e6beSH. Peter Anvin * address given in m16:16 3248170e6beSH. Peter Anvin * FF /5 JMP m16:32 Jump far, absolute indirect, 3258170e6beSH. Peter Anvin * address given in m16:32. 3268170e6beSH. Peter Anvin * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 3278170e6beSH. Peter Anvin * address given in m16:64. 328250c2277SThomas Gleixner */ 32931dcfec1SJosh Poimboeuf pushq $.Lafter_lret # put return address on stack for unwinder 330a7bea830SJan Beulich xorl %ebp, %ebp # clear frame pointer 331250c2277SThomas Gleixner movq initial_code(%rip), %rax 332250c2277SThomas Gleixner pushq $__KERNEL_CS # set correct cs 333250c2277SThomas Gleixner pushq %rax # target address in negative space 334250c2277SThomas Gleixner lretq 33531dcfec1SJosh Poimboeuf.Lafter_lret: 3363e3f0695SPeter Zijlstra ANNOTATE_NOENDBR 337bc7b11c0SJiri SlabySYM_CODE_END(secondary_startup_64) 338250c2277SThomas Gleixner 33904633df0SBorislav Petkov#include "verify_cpu.S" 340c9f09539SJoerg Roedel#include "sev_verify_cbit.S" 34104633df0SBorislav Petkov 34242e78e97SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU 34342e78e97SFenghua Yu/* 34442e78e97SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 34542e78e97SFenghua Yu * up already except stack. We just set up stack here. Then call 34679d243a0SBorislav Petkov * start_secondary() via .Ljump_to_C_code. 34742e78e97SFenghua Yu */ 348bc7b11c0SJiri SlabySYM_CODE_START(start_cpu0) 3492704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 35061a73f5cSJosh Poimboeuf movq initial_stack(%rip), %rsp 35179d243a0SBorislav Petkov jmp .Ljump_to_C_code 352bc7b11c0SJiri SlabySYM_CODE_END(start_cpu0) 35342e78e97SFenghua Yu#endif 35442e78e97SFenghua Yu 3551aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT 3561aa9aa8eSJoerg Roedel/* 3571aa9aa8eSJoerg Roedel * VC Exception handler used during early boot when running on kernel 3581aa9aa8eSJoerg Roedel * addresses, but before the switch to the idt_table can be made. 3591aa9aa8eSJoerg Roedel * The early_idt_handler_array can't be used here because it calls into a lot 3601aa9aa8eSJoerg Roedel * of __init code and this handler is also used during CPU offlining/onlining. 3611aa9aa8eSJoerg Roedel * Therefore this handler ends up in the .text section so that it stays around 3621aa9aa8eSJoerg Roedel * when .init.text is freed. 3631aa9aa8eSJoerg Roedel */ 3641aa9aa8eSJoerg RoedelSYM_CODE_START_NOALIGN(vc_boot_ghcb) 3651aa9aa8eSJoerg Roedel UNWIND_HINT_IRET_REGS offset=8 366e8d61bdfSPeter Zijlstra ENDBR 3671aa9aa8eSJoerg Roedel 3681aa9aa8eSJoerg Roedel /* Build pt_regs */ 3691aa9aa8eSJoerg Roedel PUSH_AND_CLEAR_REGS 3701aa9aa8eSJoerg Roedel 3711aa9aa8eSJoerg Roedel /* Call C handler */ 3721aa9aa8eSJoerg Roedel movq %rsp, %rdi 3731aa9aa8eSJoerg Roedel movq ORIG_RAX(%rsp), %rsi 3741aa9aa8eSJoerg Roedel movq initial_vc_handler(%rip), %rax 3751aa9aa8eSJoerg Roedel ANNOTATE_RETPOLINE_SAFE 3761aa9aa8eSJoerg Roedel call *%rax 3771aa9aa8eSJoerg Roedel 3781aa9aa8eSJoerg Roedel /* Unwind pt_regs */ 3791aa9aa8eSJoerg Roedel POP_REGS 3801aa9aa8eSJoerg Roedel 3811aa9aa8eSJoerg Roedel /* Remove Error Code */ 3821aa9aa8eSJoerg Roedel addq $8, %rsp 3831aa9aa8eSJoerg Roedel 3841aa9aa8eSJoerg Roedel iretq 3851aa9aa8eSJoerg RoedelSYM_CODE_END(vc_boot_ghcb) 3861aa9aa8eSJoerg Roedel#endif 3871aa9aa8eSJoerg Roedel 388b32f96c7SJosh Poimboeuf /* Both SMP bootup and ACPI suspend change these variables */ 389da5968aeSSam Ravnborg __REFDATA 3908170e6beSH. Peter Anvin .balign 8 391b1bd27b9SJiri SlabySYM_DATA(initial_code, .quad x86_64_start_kernel) 392b1bd27b9SJiri SlabySYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) 3931aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT 3941aa9aa8eSJoerg RoedelSYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) 3951aa9aa8eSJoerg Roedel#endif 396b1bd27b9SJiri Slaby 39722dc3918SJosh Poimboeuf/* 3986627eb25SH. Peter Anvin (Intel) * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder 399b1bd27b9SJiri Slaby * reliably detect the end of the stack. 40022dc3918SJosh Poimboeuf */ 4016627eb25SH. Peter Anvin (Intel)SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE) 402b9af7c0dSSuresh Siddha __FINITDATA 403250c2277SThomas Gleixner 4048170e6beSH. Peter Anvin __INIT 405bc7b11c0SJiri SlabySYM_CODE_START(early_idt_handler_array) 406749c970aSAndi Kleen i = 0 407749c970aSAndi Kleen .rept NUM_EXCEPTION_VECTORS 40882c62fa0SJosh Poimboeuf .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 4092704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS 4108f93402bSPeter Zijlstra ENDBR 4119900aa2fSH. Peter Anvin pushq $0 # Dummy error code, to make stack frame uniform 4122704fbb6SJosh Poimboeuf .else 4132704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 4148f93402bSPeter Zijlstra ENDBR 4159900aa2fSH. Peter Anvin .endif 4169900aa2fSH. Peter Anvin pushq $i # 72(%rsp) Vector number 417cdeb6048SAndy Lutomirski jmp early_idt_handler_common 4182704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS 419749c970aSAndi Kleen i = i + 1 420cdeb6048SAndy Lutomirski .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 421749c970aSAndi Kleen .endr 422bc7b11c0SJiri SlabySYM_CODE_END(early_idt_handler_array) 4235b2fc515SPeter Zijlstra ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS] 4248866cd9dSRoland McGrath 425ef77e688SJiri SlabySYM_CODE_START_LOCAL(early_idt_handler_common) 4268f93402bSPeter Zijlstra UNWIND_HINT_IRET_REGS offset=16 427cdeb6048SAndy Lutomirski /* 428cdeb6048SAndy Lutomirski * The stack is the hardware frame, an error code or zero, and the 429cdeb6048SAndy Lutomirski * vector number. 430cdeb6048SAndy Lutomirski */ 4319900aa2fSH. Peter Anvin cld 4329900aa2fSH. Peter Anvin 433250c2277SThomas Gleixner incl early_recursion_flag(%rip) 4349900aa2fSH. Peter Anvin 4357bbcdb1cSAndy Lutomirski /* The vector number is currently in the pt_regs->di slot. */ 4367bbcdb1cSAndy Lutomirski pushq %rsi /* pt_regs->si */ 4377bbcdb1cSAndy Lutomirski movq 8(%rsp), %rsi /* RSI = vector number */ 4387bbcdb1cSAndy Lutomirski movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 4397bbcdb1cSAndy Lutomirski pushq %rdx /* pt_regs->dx */ 4407bbcdb1cSAndy Lutomirski pushq %rcx /* pt_regs->cx */ 4417bbcdb1cSAndy Lutomirski pushq %rax /* pt_regs->ax */ 4427bbcdb1cSAndy Lutomirski pushq %r8 /* pt_regs->r8 */ 4437bbcdb1cSAndy Lutomirski pushq %r9 /* pt_regs->r9 */ 4447bbcdb1cSAndy Lutomirski pushq %r10 /* pt_regs->r10 */ 4457bbcdb1cSAndy Lutomirski pushq %r11 /* pt_regs->r11 */ 4467bbcdb1cSAndy Lutomirski pushq %rbx /* pt_regs->bx */ 4477bbcdb1cSAndy Lutomirski pushq %rbp /* pt_regs->bp */ 4487bbcdb1cSAndy Lutomirski pushq %r12 /* pt_regs->r12 */ 4497bbcdb1cSAndy Lutomirski pushq %r13 /* pt_regs->r13 */ 4507bbcdb1cSAndy Lutomirski pushq %r14 /* pt_regs->r14 */ 4517bbcdb1cSAndy Lutomirski pushq %r15 /* pt_regs->r15 */ 4522704fbb6SJosh Poimboeuf UNWIND_HINT_REGS 4539900aa2fSH. Peter Anvin 4547bbcdb1cSAndy Lutomirski movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 4554b47cdbdSJoerg Roedel call do_early_exception 4569900aa2fSH. Peter Anvin 4579900aa2fSH. Peter Anvin decl early_recursion_flag(%rip) 45826c4ef9cSAndy Lutomirski jmp restore_regs_and_return_to_kernel 459ef77e688SJiri SlabySYM_CODE_END(early_idt_handler_common) 4609900aa2fSH. Peter Anvin 46174d8d9d5SJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT 46274d8d9d5SJoerg Roedel/* 46374d8d9d5SJoerg Roedel * VC Exception handler used during very early boot. The 46474d8d9d5SJoerg Roedel * early_idt_handler_array can't be used because it returns via the 46574d8d9d5SJoerg Roedel * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early. 46674d8d9d5SJoerg Roedel * 4678b87d8ceSPeter Zijlstra * XXX it does, fix this. 4688b87d8ceSPeter Zijlstra * 46974d8d9d5SJoerg Roedel * This handler will end up in the .init.text section and not be 47074d8d9d5SJoerg Roedel * available to boot secondary CPUs. 47174d8d9d5SJoerg Roedel */ 47274d8d9d5SJoerg RoedelSYM_CODE_START_NOALIGN(vc_no_ghcb) 47374d8d9d5SJoerg Roedel UNWIND_HINT_IRET_REGS offset=8 474e8d61bdfSPeter Zijlstra ENDBR 47574d8d9d5SJoerg Roedel 47674d8d9d5SJoerg Roedel /* Build pt_regs */ 47774d8d9d5SJoerg Roedel PUSH_AND_CLEAR_REGS 47874d8d9d5SJoerg Roedel 47974d8d9d5SJoerg Roedel /* Call C handler */ 48074d8d9d5SJoerg Roedel movq %rsp, %rdi 48174d8d9d5SJoerg Roedel movq ORIG_RAX(%rsp), %rsi 48274d8d9d5SJoerg Roedel call do_vc_no_ghcb 48374d8d9d5SJoerg Roedel 48474d8d9d5SJoerg Roedel /* Unwind pt_regs */ 48574d8d9d5SJoerg Roedel POP_REGS 48674d8d9d5SJoerg Roedel 48774d8d9d5SJoerg Roedel /* Remove Error Code */ 48874d8d9d5SJoerg Roedel addq $8, %rsp 48974d8d9d5SJoerg Roedel 49074d8d9d5SJoerg Roedel /* Pure iret required here - don't use INTERRUPT_RETURN */ 49174d8d9d5SJoerg Roedel iretq 49274d8d9d5SJoerg RoedelSYM_CODE_END(vc_no_ghcb) 49374d8d9d5SJoerg Roedel#endif 494b1bd27b9SJiri Slaby 495b1bd27b9SJiri Slaby#define SYM_DATA_START_PAGE_ALIGNED(name) \ 496b1bd27b9SJiri Slaby SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) 497250c2277SThomas Gleixner 498d9e9a641SDave Hansen#ifdef CONFIG_PAGE_TABLE_ISOLATION 499d9e9a641SDave Hansen/* 500d9e9a641SDave Hansen * Each PGD needs to be 8k long and 8k aligned. We do not 501d9e9a641SDave Hansen * ever go out to userspace with these, so we do not 502d9e9a641SDave Hansen * strictly *need* the second page, but this allows us to 503d9e9a641SDave Hansen * have a single set_pgd() implementation that does not 504d9e9a641SDave Hansen * need to worry about whether it has 4k or 8k to work 505d9e9a641SDave Hansen * with. 506d9e9a641SDave Hansen * 507d9e9a641SDave Hansen * This ensures PGDs are 8k long: 508d9e9a641SDave Hansen */ 509d9e9a641SDave Hansen#define PTI_USER_PGD_FILL 512 510d9e9a641SDave Hansen/* This ensures they are 8k-aligned: */ 511b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \ 512b1bd27b9SJiri Slaby SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) 513d9e9a641SDave Hansen#else 514b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \ 515b1bd27b9SJiri Slaby SYM_DATA_START_PAGE_ALIGNED(name) 516d9e9a641SDave Hansen#define PTI_USER_PGD_FILL 0 517d9e9a641SDave Hansen#endif 518d9e9a641SDave Hansen 519250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */ 520250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT) \ 521250c2277SThomas Gleixner i = 0 ; \ 522250c2277SThomas Gleixner .rept (COUNT) ; \ 5230e192b99SCyrill Gorcunov .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 524250c2277SThomas Gleixner i = i + 1 ; \ 525250c2277SThomas Gleixner .endr 526250c2277SThomas Gleixner 5278170e6beSH. Peter Anvin __INITDATA 5281a8770b7SJiri Slaby .balign 4 5291a8770b7SJiri Slaby 530b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(early_top_pgt) 5316f9dd329SKirill A. Shutemov .fill 512,8,0 532d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 533b1bd27b9SJiri SlabySYM_DATA_END(early_top_pgt) 5348170e6beSH. Peter Anvin 535b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) 5368170e6beSH. Peter Anvin .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 537b1bd27b9SJiri SlabySYM_DATA_END(early_dynamic_pgts) 5388170e6beSH. Peter Anvin 539b1bd27b9SJiri SlabySYM_DATA(early_recursion_flag, .long 0) 5401a8770b7SJiri Slaby 541b9af7c0dSSuresh Siddha .data 5428170e6beSH. Peter Anvin 5437733607fSMaran Wilson#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) 544b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt) 54521729f81STom Lendacky .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 546b9952ec7SKirill A. Shutemov .org init_top_pgt + L4_PAGE_OFFSET*8, 0 54721729f81STom Lendacky .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 548b9952ec7SKirill A. Shutemov .org init_top_pgt + L4_START_KERNEL*8, 0 549250c2277SThomas Gleixner /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 55021729f81STom Lendacky .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 551d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 552b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt) 553250c2277SThomas Gleixner 554b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) 55521729f81STom Lendacky .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 556250c2277SThomas Gleixner .fill 511, 8, 0 557b1bd27b9SJiri SlabySYM_DATA_END(level3_ident_pgt) 558b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) 559430d4005SDave Hansen /* 560430d4005SDave Hansen * Since I easily can, map the first 1G. 5618170e6beSH. Peter Anvin * Don't set NX because code runs from these pages. 562430d4005SDave Hansen * 563430d4005SDave Hansen * Note: This sets _PAGE_GLOBAL despite whether 564430d4005SDave Hansen * the CPU supports it or it is enabled. But, 565430d4005SDave Hansen * the CPU should ignore the bit. 5668170e6beSH. Peter Anvin */ 5678170e6beSH. Peter Anvin PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 568b1bd27b9SJiri SlabySYM_DATA_END(level2_ident_pgt) 5694375c299SKirill A. Shutemov#else 570b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt) 5714375c299SKirill A. Shutemov .fill 512,8,0 572d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 573b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt) 5748170e6beSH. Peter Anvin#endif 575250c2277SThomas Gleixner 576032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 577b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) 578032370b9SKirill A. Shutemov .fill 511,8,0 57921729f81STom Lendacky .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 580b1bd27b9SJiri SlabySYM_DATA_END(level4_kernel_pgt) 581032370b9SKirill A. Shutemov#endif 582032370b9SKirill A. Shutemov 583b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) 584a6523748SEduardo Habkost .fill L3_START_KERNEL,8,0 585250c2277SThomas Gleixner /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 58621729f81STom Lendacky .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 58721729f81STom Lendacky .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 588b1bd27b9SJiri SlabySYM_DATA_END(level3_kernel_pgt) 589250c2277SThomas Gleixner 590b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) 59188f3aec7SIngo Molnar /* 592ea3186b9SArvind Sankar * Kernel high mapping. 59388f3aec7SIngo Molnar * 594ea3186b9SArvind Sankar * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in 595ea3186b9SArvind Sankar * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, 596ea3186b9SArvind Sankar * 512 MiB otherwise. 59788f3aec7SIngo Molnar * 598ea3186b9SArvind Sankar * (NOTE: after that starts the module area, see MODULES_VADDR.) 599430d4005SDave Hansen * 600ea3186b9SArvind Sankar * This table is eventually used by the kernel during normal runtime. 601ea3186b9SArvind Sankar * Care must be taken to clear out undesired bits later, like _PAGE_RW 602ea3186b9SArvind Sankar * or _PAGE_GLOBAL in some cases. 60388f3aec7SIngo Molnar */ 604ea3186b9SArvind Sankar PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) 605b1bd27b9SJiri SlabySYM_DATA_END(level2_kernel_pgt) 606250c2277SThomas Gleixner 607b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) 60805ab1d8aSFeng Tang .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 60905ab1d8aSFeng Tang pgtno = 0 61005ab1d8aSFeng Tang .rept (FIXMAP_PMD_NUM) 61105ab1d8aSFeng Tang .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ 61205ab1d8aSFeng Tang + _PAGE_TABLE_NOENC; 61305ab1d8aSFeng Tang pgtno = pgtno + 1 61405ab1d8aSFeng Tang .endr 61505ab1d8aSFeng Tang /* 6 MB reserved space + a 2MB hole */ 61605ab1d8aSFeng Tang .fill 4,8,0 617b1bd27b9SJiri SlabySYM_DATA_END(level2_fixmap_pgt) 6188170e6beSH. Peter Anvin 619b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) 62005ab1d8aSFeng Tang .rept (FIXMAP_PMD_NUM) 621250c2277SThomas Gleixner .fill 512,8,0 62205ab1d8aSFeng Tang .endr 623b1bd27b9SJiri SlabySYM_DATA_END(level1_fixmap_pgt) 624250c2277SThomas Gleixner 625250c2277SThomas Gleixner#undef PMDS 626250c2277SThomas Gleixner 627250c2277SThomas Gleixner .data 628250c2277SThomas Gleixner .align 16 629250c2277SThomas Gleixner 630b1bd27b9SJiri SlabySYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1) 631b1bd27b9SJiri SlabySYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page)) 632b1bd27b9SJiri Slaby 633b1bd27b9SJiri Slaby .align 16 634250c2277SThomas Gleixner/* This must match the first entry in level2_kernel_pgt */ 635b1bd27b9SJiri SlabySYM_DATA(phys_base, .quad 0x0) 636784d5699SAl ViroEXPORT_SYMBOL(phys_base) 637250c2277SThomas Gleixner 6388c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S" 639250c2277SThomas Gleixner 64002b7da37STim Abbott __PAGE_ALIGNED_BSS 641b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(empty_zero_page) 642250c2277SThomas Gleixner .skip PAGE_SIZE 643b1bd27b9SJiri SlabySYM_DATA_END(empty_zero_page) 644784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page) 645ef7f0d6aSAndrey Ryabinin 646