1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2250c2277SThomas Gleixner/* 35b171e82SAlexander Kuleshov * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4250c2277SThomas Gleixner * 5250c2277SThomas Gleixner * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6250c2277SThomas Gleixner * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7250c2277SThomas Gleixner * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8250c2277SThomas Gleixner * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9250c2277SThomas Gleixner * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10250c2277SThomas Gleixner */ 11250c2277SThomas Gleixner 12250c2277SThomas Gleixner 13250c2277SThomas Gleixner#include <linux/linkage.h> 14250c2277SThomas Gleixner#include <linux/threads.h> 15250c2277SThomas Gleixner#include <linux/init.h> 16ca5999fdSMike Rapoport#include <linux/pgtable.h> 1765fddcfcSMike Rapoport#include <asm/segment.h> 18250c2277SThomas Gleixner#include <asm/page.h> 19250c2277SThomas Gleixner#include <asm/msr.h> 20250c2277SThomas Gleixner#include <asm/cache.h> 21369101daSCyrill Gorcunov#include <asm/processor-flags.h> 22b12d8db8STejun Heo#include <asm/percpu.h> 239900aa2fSH. Peter Anvin#include <asm/nops.h> 247bbcdb1cSAndy Lutomirski#include "../entry/calling.h" 25784d5699SAl Viro#include <asm/export.h> 26bd89004fSPeter Zijlstra#include <asm/nospec-branch.h> 27*7e75178aSDavid Woodhouse#include <asm/apicdef.h> 2805ab1d8aSFeng Tang#include <asm/fixmap.h> 29*7e75178aSDavid Woodhouse#include <asm/smp.h> 30250c2277SThomas Gleixner 3175da04f7SThomas Gleixner/* 3275da04f7SThomas Gleixner * We are not able to switch in one step to the final KERNEL ADDRESS SPACE 33250c2277SThomas Gleixner * because we need identity-mapped pages. 34250c2277SThomas Gleixner */ 35b9952ec7SKirill A. Shutemov#define l4_index(x) (((x) >> 39) & 511) 36a6523748SEduardo Habkost#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 37a6523748SEduardo Habkost 38b9952ec7SKirill A. ShutemovL4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 39b9952ec7SKirill A. ShutemovL4_START_KERNEL = l4_index(__START_KERNEL_map) 40b9952ec7SKirill A. Shutemov 41a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map) 42a6523748SEduardo Habkost 43250c2277SThomas Gleixner .text 444ae59b91STim Abbott __HEAD 45250c2277SThomas Gleixner .code64 4637818afdSJiri SlabySYM_CODE_START_NOALIGN(startup_64) 47fb799447SJosh Poimboeuf UNWIND_HINT_END_OF_STACK 48250c2277SThomas Gleixner /* 491256276cSKonrad Rzeszutek Wilk * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 50250c2277SThomas Gleixner * and someone has loaded an identity mapped page table 51250c2277SThomas Gleixner * for us. These identity mapped page tables map all of the 52250c2277SThomas Gleixner * kernel pages and possibly all of memory. 53250c2277SThomas Gleixner * 548170e6beSH. Peter Anvin * %rsi holds a physical pointer to real_mode_data. 55250c2277SThomas Gleixner * 56250c2277SThomas Gleixner * We come here either directly from a 64bit bootloader, or from 575b171e82SAlexander Kuleshov * arch/x86/boot/compressed/head_64.S. 58250c2277SThomas Gleixner * 59250c2277SThomas Gleixner * We only come here initially at boot nothing else comes here. 60250c2277SThomas Gleixner * 61250c2277SThomas Gleixner * Since we may be loaded at an address different from what we were 62250c2277SThomas Gleixner * compiled to run at we first fixup the physical addresses in our page 63250c2277SThomas Gleixner * tables and then reload them. 64250c2277SThomas Gleixner */ 65250c2277SThomas Gleixner 663adee777SBrian Gerst /* Set up the stack for verify_cpu() */ 673adee777SBrian Gerst leaq (__end_init_task - PTREGS_SIZE)(%rip), %rsp 6891ed140dSBorislav Petkov 69866b556eSJoerg Roedel leaq _text(%rip), %rdi 70469693d8SMichael Roth 718f6be6d8SBrian Gerst /* Setup GSBASE to allow stack canary access for C code */ 72469693d8SMichael Roth movl $MSR_GS_BASE, %ecx 738f6be6d8SBrian Gerst leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx 748f6be6d8SBrian Gerst movl %edx, %eax 75469693d8SMichael Roth shrq $32, %rdx 76469693d8SMichael Roth wrmsr 77469693d8SMichael Roth 78866b556eSJoerg Roedel pushq %rsi 79866b556eSJoerg Roedel call startup_64_setup_env 80866b556eSJoerg Roedel popq %rsi 81866b556eSJoerg Roedel 82bcce8290SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT 83bcce8290SMichael Roth /* 84bcce8290SMichael Roth * Activate SEV/SME memory encryption if supported/enabled. This needs to 85bcce8290SMichael Roth * be done now, since this also includes setup of the SEV-SNP CPUID table, 86bcce8290SMichael Roth * which needs to be done before any CPUID instructions are executed in 87bcce8290SMichael Roth * subsequent code. 88bcce8290SMichael Roth */ 89bcce8290SMichael Roth movq %rsi, %rdi 90bcce8290SMichael Roth pushq %rsi 91bcce8290SMichael Roth call sme_enable 92bcce8290SMichael Roth popq %rsi 93bcce8290SMichael Roth#endif 94bcce8290SMichael Roth 95866b556eSJoerg Roedel /* Now switch to __KERNEL_CS so IRET works reliably */ 96866b556eSJoerg Roedel pushq $__KERNEL_CS 97866b556eSJoerg Roedel leaq .Lon_kernel_cs(%rip), %rax 98866b556eSJoerg Roedel pushq %rax 99866b556eSJoerg Roedel lretq 100866b556eSJoerg Roedel 101866b556eSJoerg Roedel.Lon_kernel_cs: 102fb799447SJosh Poimboeuf UNWIND_HINT_END_OF_STACK 103866b556eSJoerg Roedel 10404633df0SBorislav Petkov /* Sanitize CPU configuration */ 10504633df0SBorislav Petkov call verify_cpu 10604633df0SBorislav Petkov 1075868f365STom Lendacky /* 1085868f365STom Lendacky * Perform pagetable fixups. Additionally, if SME is active, encrypt 1095868f365STom Lendacky * the kernel and retrieve the modifier (SME encryption mask if SME 1105868f365STom Lendacky * is active) to be added to the initial pgdir entry that will be 1115868f365STom Lendacky * programmed into CR3. 1125868f365STom Lendacky */ 113250c2277SThomas Gleixner leaq _text(%rip), %rdi 114c88d7150SKirill A. Shutemov pushq %rsi 115c88d7150SKirill A. Shutemov call __startup_64 116c88d7150SKirill A. Shutemov popq %rsi 117250c2277SThomas Gleixner 1185868f365STom Lendacky /* Form the CR3 value being sure to include the CR3 modifier */ 1195868f365STom Lendacky addq $(early_top_pgt - __START_KERNEL_map), %rax 1208170e6beSH. Peter Anvin jmp 1f 12137818afdSJiri SlabySYM_CODE_END(startup_64) 12237818afdSJiri Slaby 123bc7b11c0SJiri SlabySYM_CODE_START(secondary_startup_64) 124fb799447SJosh Poimboeuf UNWIND_HINT_END_OF_STACK 1253e3f0695SPeter Zijlstra ANNOTATE_NOENDBR 126250c2277SThomas Gleixner /* 1271256276cSKonrad Rzeszutek Wilk * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 128250c2277SThomas Gleixner * and someone has loaded a mapped page table. 129250c2277SThomas Gleixner * 1308170e6beSH. Peter Anvin * %rsi holds a physical pointer to real_mode_data. 131250c2277SThomas Gleixner * 132250c2277SThomas Gleixner * We come here either from startup_64 (using physical addresses) 133250c2277SThomas Gleixner * or from trampoline.S (using virtual addresses). 134250c2277SThomas Gleixner * 135250c2277SThomas Gleixner * Using virtual addresses from trampoline.S removes the need 136250c2277SThomas Gleixner * to have any identity mapped pages in the kernel page table 137250c2277SThomas Gleixner * after the boot processor executes this code. 138250c2277SThomas Gleixner */ 139250c2277SThomas Gleixner 14004633df0SBorislav Petkov /* Sanitize CPU configuration */ 14104633df0SBorislav Petkov call verify_cpu 14204633df0SBorislav Petkov 1435868f365STom Lendacky /* 1443ecacdbdSJoerg Roedel * The secondary_startup_64_no_verify entry point is only used by 1453ecacdbdSJoerg Roedel * SEV-ES guests. In those guests the call to verify_cpu() would cause 1463ecacdbdSJoerg Roedel * #VC exceptions which can not be handled at this stage of secondary 1473ecacdbdSJoerg Roedel * CPU bringup. 1483ecacdbdSJoerg Roedel * 1493ecacdbdSJoerg Roedel * All non SEV-ES systems, especially Intel systems, need to execute 1503ecacdbdSJoerg Roedel * verify_cpu() above to make sure NX is enabled. 1513ecacdbdSJoerg Roedel */ 1523ecacdbdSJoerg RoedelSYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) 153fb799447SJosh Poimboeuf UNWIND_HINT_END_OF_STACK 1543e3f0695SPeter Zijlstra ANNOTATE_NOENDBR 1553ecacdbdSJoerg Roedel 1563ecacdbdSJoerg Roedel /* 1575868f365STom Lendacky * Retrieve the modifier (SME encryption mask if SME is active) to be 1585868f365STom Lendacky * added to the initial pgdir entry that will be programmed into CR3. 1595868f365STom Lendacky */ 160469693d8SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT 161469693d8SMichael Roth movq sme_me_mask, %rax 162469693d8SMichael Roth#else 163469693d8SMichael Roth xorq %rax, %rax 164469693d8SMichael Roth#endif 1655868f365STom Lendacky 1665868f365STom Lendacky /* Form the CR3 value being sure to include the CR3 modifier */ 1675868f365STom Lendacky addq $(init_top_pgt - __START_KERNEL_map), %rax 1688170e6beSH. Peter Anvin1: 1698170e6beSH. Peter Anvin 17077a512e3SSean Christopherson#ifdef CONFIG_X86_MCE 17177a512e3SSean Christopherson /* 17277a512e3SSean Christopherson * Preserve CR4.MCE if the kernel will enable #MC support. 17377a512e3SSean Christopherson * Clearing MCE may fault in some environments (that also force #MC 17477a512e3SSean Christopherson * support). Any machine check that occurs before #MC support is fully 17577a512e3SSean Christopherson * configured will crash the system regardless of the CR4.MCE value set 17677a512e3SSean Christopherson * here. 17777a512e3SSean Christopherson */ 17877a512e3SSean Christopherson movq %cr4, %rcx 17977a512e3SSean Christopherson andl $X86_CR4_MCE, %ecx 18077a512e3SSean Christopherson#else 18177a512e3SSean Christopherson movl $0, %ecx 18277a512e3SSean Christopherson#endif 18377a512e3SSean Christopherson 184032370b9SKirill A. Shutemov /* Enable PAE mode, PGE and LA57 */ 18577a512e3SSean Christopherson orl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 186032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 18739b95522SKirill A. Shutemov testl $1, __pgtable_l5_enabled(%rip) 1886f9dd329SKirill A. Shutemov jz 1f 189032370b9SKirill A. Shutemov orl $X86_CR4_LA57, %ecx 1906f9dd329SKirill A. Shutemov1: 191032370b9SKirill A. Shutemov#endif 1928170e6beSH. Peter Anvin movq %rcx, %cr4 193250c2277SThomas Gleixner 194032370b9SKirill A. Shutemov /* Setup early boot stage 4-/5-level pagetables. */ 195250c2277SThomas Gleixner addq phys_base(%rip), %rax 196c9f09539SJoerg Roedel 197c9f09539SJoerg Roedel /* 198c9f09539SJoerg Roedel * For SEV guests: Verify that the C-bit is correct. A malicious 199c9f09539SJoerg Roedel * hypervisor could lie about the C-bit position to perform a ROP 200c9f09539SJoerg Roedel * attack on the guest by writing to the unencrypted stack and wait for 201c9f09539SJoerg Roedel * the next RET instruction. 202c9f09539SJoerg Roedel * %rsi carries pointer to realmode data and is callee-clobbered. Save 203c9f09539SJoerg Roedel * and restore it. 204c9f09539SJoerg Roedel */ 205c9f09539SJoerg Roedel pushq %rsi 206c9f09539SJoerg Roedel movq %rax, %rdi 207c9f09539SJoerg Roedel call sev_verify_cbit 208c9f09539SJoerg Roedel popq %rsi 209c9f09539SJoerg Roedel 210f154f290SJoerg Roedel /* 211f154f290SJoerg Roedel * Switch to new page-table 212f154f290SJoerg Roedel * 213f154f290SJoerg Roedel * For the boot CPU this switches to early_top_pgt which still has the 214f154f290SJoerg Roedel * indentity mappings present. The secondary CPUs will switch to the 215f154f290SJoerg Roedel * init_top_pgt here, away from the trampoline_pgd and unmap the 216f154f290SJoerg Roedel * indentity mapped ranges. 217f154f290SJoerg Roedel */ 218250c2277SThomas Gleixner movq %rax, %cr3 219250c2277SThomas Gleixner 220f154f290SJoerg Roedel /* 221f154f290SJoerg Roedel * Do a global TLB flush after the CR3 switch to make sure the TLB 222f154f290SJoerg Roedel * entries from the identity mapping are flushed. 223f154f290SJoerg Roedel */ 224f154f290SJoerg Roedel movq %cr4, %rcx 225f154f290SJoerg Roedel movq %rcx, %rax 226f154f290SJoerg Roedel xorq $X86_CR4_PGE, %rcx 227f154f290SJoerg Roedel movq %rcx, %cr4 228f154f290SJoerg Roedel movq %rax, %cr4 229f154f290SJoerg Roedel 230250c2277SThomas Gleixner /* Ensure I am executing from virtual addresses */ 231250c2277SThomas Gleixner movq $1f, %rax 232bd89004fSPeter Zijlstra ANNOTATE_RETPOLINE_SAFE 233250c2277SThomas Gleixner jmp *%rax 234250c2277SThomas Gleixner1: 235fb799447SJosh Poimboeuf UNWIND_HINT_END_OF_STACK 2363e3f0695SPeter Zijlstra ANNOTATE_NOENDBR // above 237250c2277SThomas Gleixner 2383adee777SBrian Gerst#ifdef CONFIG_SMP 239*7e75178aSDavid Woodhouse /* 240*7e75178aSDavid Woodhouse * For parallel boot, the APIC ID is read from the APIC, and then 241*7e75178aSDavid Woodhouse * used to look up the CPU number. For booting a single CPU, the 242*7e75178aSDavid Woodhouse * CPU number is encoded in smpboot_control. 243*7e75178aSDavid Woodhouse * 244*7e75178aSDavid Woodhouse * Bit 31 STARTUP_READ_APICID (Read APICID from APIC) 245*7e75178aSDavid Woodhouse * Bit 0-23 CPU# if STARTUP_xx flags are not set 246*7e75178aSDavid Woodhouse */ 2473adee777SBrian Gerst movl smpboot_control(%rip), %ecx 248*7e75178aSDavid Woodhouse testl $STARTUP_READ_APICID, %ecx 249*7e75178aSDavid Woodhouse jnz .Lread_apicid 250*7e75178aSDavid Woodhouse /* 251*7e75178aSDavid Woodhouse * No control bit set, single CPU bringup. CPU number is provided 252*7e75178aSDavid Woodhouse * in bit 0-23. This is also the boot CPU case (CPU number 0). 253*7e75178aSDavid Woodhouse */ 254*7e75178aSDavid Woodhouse andl $(~STARTUP_PARALLEL_MASK), %ecx 255*7e75178aSDavid Woodhouse jmp .Lsetup_cpu 2563adee777SBrian Gerst 257*7e75178aSDavid Woodhouse.Lread_apicid: 258*7e75178aSDavid Woodhouse /* Check whether X2APIC mode is already enabled */ 259*7e75178aSDavid Woodhouse mov $MSR_IA32_APICBASE, %ecx 260*7e75178aSDavid Woodhouse rdmsr 261*7e75178aSDavid Woodhouse testl $X2APIC_ENABLE, %eax 262*7e75178aSDavid Woodhouse jnz .Lread_apicid_msr 263*7e75178aSDavid Woodhouse 264*7e75178aSDavid Woodhouse /* Read the APIC ID from the fix-mapped MMIO space. */ 265*7e75178aSDavid Woodhouse movq apic_mmio_base(%rip), %rcx 266*7e75178aSDavid Woodhouse addq $APIC_ID, %rcx 267*7e75178aSDavid Woodhouse movl (%rcx), %eax 268*7e75178aSDavid Woodhouse shr $24, %eax 269*7e75178aSDavid Woodhouse jmp .Llookup_AP 270*7e75178aSDavid Woodhouse 271*7e75178aSDavid Woodhouse.Lread_apicid_msr: 272*7e75178aSDavid Woodhouse mov $APIC_X2APIC_ID_MSR, %ecx 273*7e75178aSDavid Woodhouse rdmsr 274*7e75178aSDavid Woodhouse 275*7e75178aSDavid Woodhouse.Llookup_AP: 276*7e75178aSDavid Woodhouse /* EAX contains the APIC ID of the current CPU */ 277*7e75178aSDavid Woodhouse xorq %rcx, %rcx 278*7e75178aSDavid Woodhouse leaq cpuid_to_apicid(%rip), %rbx 279*7e75178aSDavid Woodhouse 280*7e75178aSDavid Woodhouse.Lfind_cpunr: 281*7e75178aSDavid Woodhouse cmpl (%rbx,%rcx,4), %eax 282*7e75178aSDavid Woodhouse jz .Lsetup_cpu 283*7e75178aSDavid Woodhouse inc %ecx 284*7e75178aSDavid Woodhouse#ifdef CONFIG_FORCE_NR_CPUS 285*7e75178aSDavid Woodhouse cmpl $NR_CPUS, %ecx 286*7e75178aSDavid Woodhouse#else 287*7e75178aSDavid Woodhouse cmpl nr_cpu_ids(%rip), %ecx 288*7e75178aSDavid Woodhouse#endif 289*7e75178aSDavid Woodhouse jb .Lfind_cpunr 290*7e75178aSDavid Woodhouse 291*7e75178aSDavid Woodhouse /* APIC ID not found in the table. Drop the trampoline lock and bail. */ 292*7e75178aSDavid Woodhouse movq trampoline_lock(%rip), %rax 293*7e75178aSDavid Woodhouse movl $0, (%rax) 294*7e75178aSDavid Woodhouse 295*7e75178aSDavid Woodhouse1: cli 296*7e75178aSDavid Woodhouse hlt 297*7e75178aSDavid Woodhouse jmp 1b 298*7e75178aSDavid Woodhouse 299*7e75178aSDavid Woodhouse.Lsetup_cpu: 3003adee777SBrian Gerst /* Get the per cpu offset for the given CPU# which is in ECX */ 3013adee777SBrian Gerst movq __per_cpu_offset(,%rcx,8), %rdx 3023adee777SBrian Gerst#else 3033adee777SBrian Gerst xorl %edx, %edx /* zero-extended to clear all of RDX */ 3043adee777SBrian Gerst#endif /* CONFIG_SMP */ 3053adee777SBrian Gerst 3063adee777SBrian Gerst /* 3073adee777SBrian Gerst * Setup a boot time stack - Any secondary CPU will have lost its stack 3083adee777SBrian Gerst * by now because the cr3-switch above unmaps the real-mode stack. 3093adee777SBrian Gerst * 3103adee777SBrian Gerst * RDX contains the per-cpu offset 3113adee777SBrian Gerst */ 3123adee777SBrian Gerst movq pcpu_hot + X86_current_task(%rdx), %rax 3133adee777SBrian Gerst movq TASK_threadsp(%rax), %rsp 3143adee777SBrian Gerst 315e04b8833SJoerg Roedel /* 316f6f1ae91SThomas Gleixner * Now that this CPU is running on its own stack, drop the realmode 317f6f1ae91SThomas Gleixner * protection. For the boot CPU the pointer is NULL! 318f6f1ae91SThomas Gleixner */ 319f6f1ae91SThomas Gleixner movq trampoline_lock(%rip), %rax 320f6f1ae91SThomas Gleixner testq %rax, %rax 321f6f1ae91SThomas Gleixner jz .Lsetup_gdt 322f6f1ae91SThomas Gleixner movl $0, (%rax) 323f6f1ae91SThomas Gleixner 324f6f1ae91SThomas Gleixner.Lsetup_gdt: 325f6f1ae91SThomas Gleixner /* 326e04b8833SJoerg Roedel * We must switch to a new descriptor in kernel space for the GDT 327e04b8833SJoerg Roedel * because soon the kernel won't have access anymore to the userspace 328e04b8833SJoerg Roedel * addresses where we're currently running on. We have to do that here 329e04b8833SJoerg Roedel * because in 32bit we couldn't load a 64bit linear address. 330e04b8833SJoerg Roedel */ 331c253b640SBrian Gerst subq $16, %rsp 332c253b640SBrian Gerst movw $(GDT_SIZE-1), (%rsp) 333c253b640SBrian Gerst leaq gdt_page(%rdx), %rax 334c253b640SBrian Gerst movq %rax, 2(%rsp) 335c253b640SBrian Gerst lgdt (%rsp) 336c253b640SBrian Gerst addq $16, %rsp 337e04b8833SJoerg Roedel 3387b99819dSJoerg Roedel /* set up data segments */ 3397b99819dSJoerg Roedel xorl %eax,%eax 3407b99819dSJoerg Roedel movl %eax,%ds 3417b99819dSJoerg Roedel movl %eax,%ss 3427b99819dSJoerg Roedel movl %eax,%es 3437b99819dSJoerg Roedel 3447b99819dSJoerg Roedel /* 3457b99819dSJoerg Roedel * We don't really need to load %fs or %gs, but load them anyway 3467b99819dSJoerg Roedel * to kill any stale realmode selectors. This allows execution 3477b99819dSJoerg Roedel * under VT hardware. 3487b99819dSJoerg Roedel */ 3497b99819dSJoerg Roedel movl %eax,%fs 3507b99819dSJoerg Roedel movl %eax,%gs 3517b99819dSJoerg Roedel 3527b99819dSJoerg Roedel /* Set up %gs. 3537b99819dSJoerg Roedel * 3547b99819dSJoerg Roedel * The base of %gs always points to fixed_percpu_data. If the 3557b99819dSJoerg Roedel * stack protector canary is enabled, it is located at %gs:40. 3567b99819dSJoerg Roedel * Note that, on SMP, the boot cpu uses init data section until 3577b99819dSJoerg Roedel * the per cpu areas are set up. 3587b99819dSJoerg Roedel */ 3597b99819dSJoerg Roedel movl $MSR_GS_BASE,%ecx 3608f6be6d8SBrian Gerst#ifndef CONFIG_SMP 3618f6be6d8SBrian Gerst leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx 3628f6be6d8SBrian Gerst#endif 3638f6be6d8SBrian Gerst movl %edx, %eax 3648f6be6d8SBrian Gerst shrq $32, %rdx 3657b99819dSJoerg Roedel wrmsr 3667b99819dSJoerg Roedel 367f5963ba7SJoerg Roedel /* Setup and Load IDT */ 368f5963ba7SJoerg Roedel pushq %rsi 369f5963ba7SJoerg Roedel call early_setup_idt 370f5963ba7SJoerg Roedel popq %rsi 371f5963ba7SJoerg Roedel 372250c2277SThomas Gleixner /* Check if nx is implemented */ 373250c2277SThomas Gleixner movl $0x80000001, %eax 374250c2277SThomas Gleixner cpuid 375250c2277SThomas Gleixner movl %edx,%edi 376250c2277SThomas Gleixner 377250c2277SThomas Gleixner /* Setup EFER (Extended Feature Enable Register) */ 378250c2277SThomas Gleixner movl $MSR_EFER, %ecx 379250c2277SThomas Gleixner rdmsr 38077a512e3SSean Christopherson /* 38177a512e3SSean Christopherson * Preserve current value of EFER for comparison and to skip 38277a512e3SSean Christopherson * EFER writes if no change was made (for TDX guest) 38377a512e3SSean Christopherson */ 38477a512e3SSean Christopherson movl %eax, %edx 385250c2277SThomas Gleixner btsl $_EFER_SCE, %eax /* Enable System Call */ 386250c2277SThomas Gleixner btl $20,%edi /* No Execute supported? */ 387250c2277SThomas Gleixner jnc 1f 388250c2277SThomas Gleixner btsl $_EFER_NX, %eax 38978d77df7SH. Peter Anvin btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 390250c2277SThomas Gleixner 39177a512e3SSean Christopherson /* Avoid writing EFER if no change was made (for TDX guest) */ 39277a512e3SSean Christopherson1: cmpl %edx, %eax 39377a512e3SSean Christopherson je 1f 39477a512e3SSean Christopherson xor %edx, %edx 39577a512e3SSean Christopherson wrmsr /* Make changes effective */ 39677a512e3SSean Christopherson1: 397250c2277SThomas Gleixner /* Setup cr0 */ 398369101daSCyrill Gorcunov movl $CR0_STATE, %eax 399250c2277SThomas Gleixner /* Make changes effective */ 400250c2277SThomas Gleixner movq %rax, %cr0 401250c2277SThomas Gleixner 402250c2277SThomas Gleixner /* zero EFLAGS after setting rsp */ 403250c2277SThomas Gleixner pushq $0 404250c2277SThomas Gleixner popfq 405250c2277SThomas Gleixner 4068170e6beSH. Peter Anvin /* rsi is pointer to real mode structure with interesting info. 407250c2277SThomas Gleixner pass it to C */ 4088170e6beSH. Peter Anvin movq %rsi, %rdi 409250c2277SThomas Gleixner 41079d243a0SBorislav Petkov.Ljump_to_C_code: 411a9468df5SJosh Poimboeuf /* 412a9468df5SJosh Poimboeuf * Jump to run C code and to be on a real kernel address. 413250c2277SThomas Gleixner * Since we are running on identity-mapped space we have to jump 414250c2277SThomas Gleixner * to the full 64bit address, this is only possible as indirect 415250c2277SThomas Gleixner * jump. In addition we need to ensure %cs is set so we make this 416250c2277SThomas Gleixner * a far return. 4178170e6beSH. Peter Anvin * 4188170e6beSH. Peter Anvin * Note: do not change to far jump indirect with 64bit offset. 4198170e6beSH. Peter Anvin * 4208170e6beSH. Peter Anvin * AMD does not support far jump indirect with 64bit offset. 4218170e6beSH. Peter Anvin * AMD64 Architecture Programmer's Manual, Volume 3: states only 4228170e6beSH. Peter Anvin * JMP FAR mem16:16 FF /5 Far jump indirect, 4238170e6beSH. Peter Anvin * with the target specified by a far pointer in memory. 4248170e6beSH. Peter Anvin * JMP FAR mem16:32 FF /5 Far jump indirect, 4258170e6beSH. Peter Anvin * with the target specified by a far pointer in memory. 4268170e6beSH. Peter Anvin * 4278170e6beSH. Peter Anvin * Intel64 does support 64bit offset. 4288170e6beSH. Peter Anvin * Software Developer Manual Vol 2: states: 4298170e6beSH. Peter Anvin * FF /5 JMP m16:16 Jump far, absolute indirect, 4308170e6beSH. Peter Anvin * address given in m16:16 4318170e6beSH. Peter Anvin * FF /5 JMP m16:32 Jump far, absolute indirect, 4328170e6beSH. Peter Anvin * address given in m16:32. 4338170e6beSH. Peter Anvin * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 4348170e6beSH. Peter Anvin * address given in m16:64. 435250c2277SThomas Gleixner */ 43631dcfec1SJosh Poimboeuf pushq $.Lafter_lret # put return address on stack for unwinder 437a7bea830SJan Beulich xorl %ebp, %ebp # clear frame pointer 438250c2277SThomas Gleixner movq initial_code(%rip), %rax 439250c2277SThomas Gleixner pushq $__KERNEL_CS # set correct cs 440250c2277SThomas Gleixner pushq %rax # target address in negative space 441250c2277SThomas Gleixner lretq 44231dcfec1SJosh Poimboeuf.Lafter_lret: 4433e3f0695SPeter Zijlstra ANNOTATE_NOENDBR 444bc7b11c0SJiri SlabySYM_CODE_END(secondary_startup_64) 445250c2277SThomas Gleixner 44604633df0SBorislav Petkov#include "verify_cpu.S" 447c9f09539SJoerg Roedel#include "sev_verify_cbit.S" 44804633df0SBorislav Petkov 449cded3679SThomas Gleixner#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT) 45042e78e97SFenghua Yu/* 451666e1156SThomas Gleixner * Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for 452666e1156SThomas Gleixner * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot 453666e1156SThomas Gleixner * unplug. Everything is set up already except the stack. 45442e78e97SFenghua Yu */ 455666e1156SThomas GleixnerSYM_CODE_START(soft_restart_cpu) 456e81dc127SThomas Gleixner ANNOTATE_NOENDBR 457fb799447SJosh Poimboeuf UNWIND_HINT_END_OF_STACK 4583adee777SBrian Gerst 4593adee777SBrian Gerst /* Find the idle task stack */ 4603adee777SBrian Gerst movq PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx 4613adee777SBrian Gerst movq TASK_threadsp(%rcx), %rsp 4623adee777SBrian Gerst 46379d243a0SBorislav Petkov jmp .Ljump_to_C_code 464666e1156SThomas GleixnerSYM_CODE_END(soft_restart_cpu) 46542e78e97SFenghua Yu#endif 46642e78e97SFenghua Yu 4671aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT 4681aa9aa8eSJoerg Roedel/* 4691aa9aa8eSJoerg Roedel * VC Exception handler used during early boot when running on kernel 4701aa9aa8eSJoerg Roedel * addresses, but before the switch to the idt_table can be made. 4711aa9aa8eSJoerg Roedel * The early_idt_handler_array can't be used here because it calls into a lot 4721aa9aa8eSJoerg Roedel * of __init code and this handler is also used during CPU offlining/onlining. 4731aa9aa8eSJoerg Roedel * Therefore this handler ends up in the .text section so that it stays around 4741aa9aa8eSJoerg Roedel * when .init.text is freed. 4751aa9aa8eSJoerg Roedel */ 4761aa9aa8eSJoerg RoedelSYM_CODE_START_NOALIGN(vc_boot_ghcb) 4771aa9aa8eSJoerg Roedel UNWIND_HINT_IRET_REGS offset=8 478e8d61bdfSPeter Zijlstra ENDBR 4791aa9aa8eSJoerg Roedel 4801aa9aa8eSJoerg Roedel /* Build pt_regs */ 4811aa9aa8eSJoerg Roedel PUSH_AND_CLEAR_REGS 4821aa9aa8eSJoerg Roedel 4831aa9aa8eSJoerg Roedel /* Call C handler */ 4841aa9aa8eSJoerg Roedel movq %rsp, %rdi 4851aa9aa8eSJoerg Roedel movq ORIG_RAX(%rsp), %rsi 4861aa9aa8eSJoerg Roedel movq initial_vc_handler(%rip), %rax 4871aa9aa8eSJoerg Roedel ANNOTATE_RETPOLINE_SAFE 4881aa9aa8eSJoerg Roedel call *%rax 4891aa9aa8eSJoerg Roedel 4901aa9aa8eSJoerg Roedel /* Unwind pt_regs */ 4911aa9aa8eSJoerg Roedel POP_REGS 4921aa9aa8eSJoerg Roedel 4931aa9aa8eSJoerg Roedel /* Remove Error Code */ 4941aa9aa8eSJoerg Roedel addq $8, %rsp 4951aa9aa8eSJoerg Roedel 4961aa9aa8eSJoerg Roedel iretq 4971aa9aa8eSJoerg RoedelSYM_CODE_END(vc_boot_ghcb) 4981aa9aa8eSJoerg Roedel#endif 4991aa9aa8eSJoerg Roedel 500b32f96c7SJosh Poimboeuf /* Both SMP bootup and ACPI suspend change these variables */ 501da5968aeSSam Ravnborg __REFDATA 5028170e6beSH. Peter Anvin .balign 8 503b1bd27b9SJiri SlabySYM_DATA(initial_code, .quad x86_64_start_kernel) 5041aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT 5051aa9aa8eSJoerg RoedelSYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) 5061aa9aa8eSJoerg Roedel#endif 507f6f1ae91SThomas Gleixner 508f6f1ae91SThomas GleixnerSYM_DATA(trampoline_lock, .quad 0); 509b9af7c0dSSuresh Siddha __FINITDATA 510250c2277SThomas Gleixner 5118170e6beSH. Peter Anvin __INIT 512bc7b11c0SJiri SlabySYM_CODE_START(early_idt_handler_array) 513749c970aSAndi Kleen i = 0 514749c970aSAndi Kleen .rept NUM_EXCEPTION_VECTORS 51582c62fa0SJosh Poimboeuf .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 5162704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS 5178f93402bSPeter Zijlstra ENDBR 5189900aa2fSH. Peter Anvin pushq $0 # Dummy error code, to make stack frame uniform 5192704fbb6SJosh Poimboeuf .else 5202704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 5218f93402bSPeter Zijlstra ENDBR 5229900aa2fSH. Peter Anvin .endif 5239900aa2fSH. Peter Anvin pushq $i # 72(%rsp) Vector number 524cdeb6048SAndy Lutomirski jmp early_idt_handler_common 5252704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS 526749c970aSAndi Kleen i = i + 1 527cdeb6048SAndy Lutomirski .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 528749c970aSAndi Kleen .endr 529bc7b11c0SJiri SlabySYM_CODE_END(early_idt_handler_array) 5305b2fc515SPeter Zijlstra ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS] 5318866cd9dSRoland McGrath 532ef77e688SJiri SlabySYM_CODE_START_LOCAL(early_idt_handler_common) 5338f93402bSPeter Zijlstra UNWIND_HINT_IRET_REGS offset=16 534cdeb6048SAndy Lutomirski /* 535cdeb6048SAndy Lutomirski * The stack is the hardware frame, an error code or zero, and the 536cdeb6048SAndy Lutomirski * vector number. 537cdeb6048SAndy Lutomirski */ 5389900aa2fSH. Peter Anvin cld 5399900aa2fSH. Peter Anvin 540250c2277SThomas Gleixner incl early_recursion_flag(%rip) 5419900aa2fSH. Peter Anvin 5427bbcdb1cSAndy Lutomirski /* The vector number is currently in the pt_regs->di slot. */ 5437bbcdb1cSAndy Lutomirski pushq %rsi /* pt_regs->si */ 5447bbcdb1cSAndy Lutomirski movq 8(%rsp), %rsi /* RSI = vector number */ 5457bbcdb1cSAndy Lutomirski movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 5467bbcdb1cSAndy Lutomirski pushq %rdx /* pt_regs->dx */ 5477bbcdb1cSAndy Lutomirski pushq %rcx /* pt_regs->cx */ 5487bbcdb1cSAndy Lutomirski pushq %rax /* pt_regs->ax */ 5497bbcdb1cSAndy Lutomirski pushq %r8 /* pt_regs->r8 */ 5507bbcdb1cSAndy Lutomirski pushq %r9 /* pt_regs->r9 */ 5517bbcdb1cSAndy Lutomirski pushq %r10 /* pt_regs->r10 */ 5527bbcdb1cSAndy Lutomirski pushq %r11 /* pt_regs->r11 */ 5537bbcdb1cSAndy Lutomirski pushq %rbx /* pt_regs->bx */ 5547bbcdb1cSAndy Lutomirski pushq %rbp /* pt_regs->bp */ 5557bbcdb1cSAndy Lutomirski pushq %r12 /* pt_regs->r12 */ 5567bbcdb1cSAndy Lutomirski pushq %r13 /* pt_regs->r13 */ 5577bbcdb1cSAndy Lutomirski pushq %r14 /* pt_regs->r14 */ 5587bbcdb1cSAndy Lutomirski pushq %r15 /* pt_regs->r15 */ 5592704fbb6SJosh Poimboeuf UNWIND_HINT_REGS 5609900aa2fSH. Peter Anvin 5617bbcdb1cSAndy Lutomirski movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 5624b47cdbdSJoerg Roedel call do_early_exception 5639900aa2fSH. Peter Anvin 5649900aa2fSH. Peter Anvin decl early_recursion_flag(%rip) 56526c4ef9cSAndy Lutomirski jmp restore_regs_and_return_to_kernel 566ef77e688SJiri SlabySYM_CODE_END(early_idt_handler_common) 5679900aa2fSH. Peter Anvin 56874d8d9d5SJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT 56974d8d9d5SJoerg Roedel/* 57074d8d9d5SJoerg Roedel * VC Exception handler used during very early boot. The 57174d8d9d5SJoerg Roedel * early_idt_handler_array can't be used because it returns via the 57274d8d9d5SJoerg Roedel * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early. 57374d8d9d5SJoerg Roedel * 5748b87d8ceSPeter Zijlstra * XXX it does, fix this. 5758b87d8ceSPeter Zijlstra * 57674d8d9d5SJoerg Roedel * This handler will end up in the .init.text section and not be 57774d8d9d5SJoerg Roedel * available to boot secondary CPUs. 57874d8d9d5SJoerg Roedel */ 57974d8d9d5SJoerg RoedelSYM_CODE_START_NOALIGN(vc_no_ghcb) 58074d8d9d5SJoerg Roedel UNWIND_HINT_IRET_REGS offset=8 581e8d61bdfSPeter Zijlstra ENDBR 58274d8d9d5SJoerg Roedel 58374d8d9d5SJoerg Roedel /* Build pt_regs */ 58474d8d9d5SJoerg Roedel PUSH_AND_CLEAR_REGS 58574d8d9d5SJoerg Roedel 58674d8d9d5SJoerg Roedel /* Call C handler */ 58774d8d9d5SJoerg Roedel movq %rsp, %rdi 58874d8d9d5SJoerg Roedel movq ORIG_RAX(%rsp), %rsi 58974d8d9d5SJoerg Roedel call do_vc_no_ghcb 59074d8d9d5SJoerg Roedel 59174d8d9d5SJoerg Roedel /* Unwind pt_regs */ 59274d8d9d5SJoerg Roedel POP_REGS 59374d8d9d5SJoerg Roedel 59474d8d9d5SJoerg Roedel /* Remove Error Code */ 59574d8d9d5SJoerg Roedel addq $8, %rsp 59674d8d9d5SJoerg Roedel 59774d8d9d5SJoerg Roedel /* Pure iret required here - don't use INTERRUPT_RETURN */ 59874d8d9d5SJoerg Roedel iretq 59974d8d9d5SJoerg RoedelSYM_CODE_END(vc_no_ghcb) 60074d8d9d5SJoerg Roedel#endif 601b1bd27b9SJiri Slaby 602b1bd27b9SJiri Slaby#define SYM_DATA_START_PAGE_ALIGNED(name) \ 603b1bd27b9SJiri Slaby SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) 604250c2277SThomas Gleixner 605d9e9a641SDave Hansen#ifdef CONFIG_PAGE_TABLE_ISOLATION 606d9e9a641SDave Hansen/* 607d9e9a641SDave Hansen * Each PGD needs to be 8k long and 8k aligned. We do not 608d9e9a641SDave Hansen * ever go out to userspace with these, so we do not 609d9e9a641SDave Hansen * strictly *need* the second page, but this allows us to 610d9e9a641SDave Hansen * have a single set_pgd() implementation that does not 611d9e9a641SDave Hansen * need to worry about whether it has 4k or 8k to work 612d9e9a641SDave Hansen * with. 613d9e9a641SDave Hansen * 614d9e9a641SDave Hansen * This ensures PGDs are 8k long: 615d9e9a641SDave Hansen */ 616d9e9a641SDave Hansen#define PTI_USER_PGD_FILL 512 617d9e9a641SDave Hansen/* This ensures they are 8k-aligned: */ 618b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \ 619b1bd27b9SJiri Slaby SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) 620d9e9a641SDave Hansen#else 621b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \ 622b1bd27b9SJiri Slaby SYM_DATA_START_PAGE_ALIGNED(name) 623d9e9a641SDave Hansen#define PTI_USER_PGD_FILL 0 624d9e9a641SDave Hansen#endif 625d9e9a641SDave Hansen 626250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */ 627250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT) \ 628250c2277SThomas Gleixner i = 0 ; \ 629250c2277SThomas Gleixner .rept (COUNT) ; \ 6300e192b99SCyrill Gorcunov .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 631250c2277SThomas Gleixner i = i + 1 ; \ 632250c2277SThomas Gleixner .endr 633250c2277SThomas Gleixner 6348170e6beSH. Peter Anvin __INITDATA 6351a8770b7SJiri Slaby .balign 4 6361a8770b7SJiri Slaby 637b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(early_top_pgt) 6386f9dd329SKirill A. Shutemov .fill 512,8,0 639d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 640b1bd27b9SJiri SlabySYM_DATA_END(early_top_pgt) 6418170e6beSH. Peter Anvin 642b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) 6438170e6beSH. Peter Anvin .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 644b1bd27b9SJiri SlabySYM_DATA_END(early_dynamic_pgts) 6458170e6beSH. Peter Anvin 646b1bd27b9SJiri SlabySYM_DATA(early_recursion_flag, .long 0) 6471a8770b7SJiri Slaby 648b9af7c0dSSuresh Siddha .data 6498170e6beSH. Peter Anvin 6507733607fSMaran Wilson#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) 651b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt) 65221729f81STom Lendacky .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 653b9952ec7SKirill A. Shutemov .org init_top_pgt + L4_PAGE_OFFSET*8, 0 65421729f81STom Lendacky .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 655b9952ec7SKirill A. Shutemov .org init_top_pgt + L4_START_KERNEL*8, 0 656250c2277SThomas Gleixner /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 65721729f81STom Lendacky .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 658d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 659b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt) 660250c2277SThomas Gleixner 661b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) 66221729f81STom Lendacky .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 663250c2277SThomas Gleixner .fill 511, 8, 0 664b1bd27b9SJiri SlabySYM_DATA_END(level3_ident_pgt) 665b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) 666430d4005SDave Hansen /* 667430d4005SDave Hansen * Since I easily can, map the first 1G. 6688170e6beSH. Peter Anvin * Don't set NX because code runs from these pages. 669430d4005SDave Hansen * 670430d4005SDave Hansen * Note: This sets _PAGE_GLOBAL despite whether 671430d4005SDave Hansen * the CPU supports it or it is enabled. But, 672430d4005SDave Hansen * the CPU should ignore the bit. 6738170e6beSH. Peter Anvin */ 6748170e6beSH. Peter Anvin PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 675b1bd27b9SJiri SlabySYM_DATA_END(level2_ident_pgt) 6764375c299SKirill A. Shutemov#else 677b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt) 6784375c299SKirill A. Shutemov .fill 512,8,0 679d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 680b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt) 6818170e6beSH. Peter Anvin#endif 682250c2277SThomas Gleixner 683032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 684b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) 685032370b9SKirill A. Shutemov .fill 511,8,0 68621729f81STom Lendacky .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 687b1bd27b9SJiri SlabySYM_DATA_END(level4_kernel_pgt) 688032370b9SKirill A. Shutemov#endif 689032370b9SKirill A. Shutemov 690b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) 691a6523748SEduardo Habkost .fill L3_START_KERNEL,8,0 692250c2277SThomas Gleixner /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 69321729f81STom Lendacky .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 69421729f81STom Lendacky .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 695b1bd27b9SJiri SlabySYM_DATA_END(level3_kernel_pgt) 696250c2277SThomas Gleixner 697b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) 69888f3aec7SIngo Molnar /* 699ea3186b9SArvind Sankar * Kernel high mapping. 70088f3aec7SIngo Molnar * 701ea3186b9SArvind Sankar * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in 702ea3186b9SArvind Sankar * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, 703ea3186b9SArvind Sankar * 512 MiB otherwise. 70488f3aec7SIngo Molnar * 705ea3186b9SArvind Sankar * (NOTE: after that starts the module area, see MODULES_VADDR.) 706430d4005SDave Hansen * 707ea3186b9SArvind Sankar * This table is eventually used by the kernel during normal runtime. 708ea3186b9SArvind Sankar * Care must be taken to clear out undesired bits later, like _PAGE_RW 709ea3186b9SArvind Sankar * or _PAGE_GLOBAL in some cases. 71088f3aec7SIngo Molnar */ 711ea3186b9SArvind Sankar PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) 712b1bd27b9SJiri SlabySYM_DATA_END(level2_kernel_pgt) 713250c2277SThomas Gleixner 714b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) 71505ab1d8aSFeng Tang .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 71605ab1d8aSFeng Tang pgtno = 0 71705ab1d8aSFeng Tang .rept (FIXMAP_PMD_NUM) 71805ab1d8aSFeng Tang .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ 71905ab1d8aSFeng Tang + _PAGE_TABLE_NOENC; 72005ab1d8aSFeng Tang pgtno = pgtno + 1 72105ab1d8aSFeng Tang .endr 72205ab1d8aSFeng Tang /* 6 MB reserved space + a 2MB hole */ 72305ab1d8aSFeng Tang .fill 4,8,0 724b1bd27b9SJiri SlabySYM_DATA_END(level2_fixmap_pgt) 7258170e6beSH. Peter Anvin 726b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) 72705ab1d8aSFeng Tang .rept (FIXMAP_PMD_NUM) 728250c2277SThomas Gleixner .fill 512,8,0 72905ab1d8aSFeng Tang .endr 730b1bd27b9SJiri SlabySYM_DATA_END(level1_fixmap_pgt) 731250c2277SThomas Gleixner 732250c2277SThomas Gleixner#undef PMDS 733250c2277SThomas Gleixner 734250c2277SThomas Gleixner .data 735250c2277SThomas Gleixner .align 16 736250c2277SThomas Gleixner 7373adee777SBrian GerstSYM_DATA(smpboot_control, .long 0) 7383adee777SBrian Gerst 7393adee777SBrian Gerst .align 16 740250c2277SThomas Gleixner/* This must match the first entry in level2_kernel_pgt */ 741b1bd27b9SJiri SlabySYM_DATA(phys_base, .quad 0x0) 742784d5699SAl ViroEXPORT_SYMBOL(phys_base) 743250c2277SThomas Gleixner 7448c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S" 745250c2277SThomas Gleixner 74602b7da37STim Abbott __PAGE_ALIGNED_BSS 747b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(empty_zero_page) 748250c2277SThomas Gleixner .skip PAGE_SIZE 749b1bd27b9SJiri SlabySYM_DATA_END(empty_zero_page) 750784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page) 751ef7f0d6aSAndrey Ryabinin 752