1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2250c2277SThomas Gleixner/* 35b171e82SAlexander Kuleshov * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4250c2277SThomas Gleixner * 5250c2277SThomas Gleixner * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6250c2277SThomas Gleixner * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7250c2277SThomas Gleixner * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8250c2277SThomas Gleixner * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9250c2277SThomas Gleixner * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10250c2277SThomas Gleixner */ 11250c2277SThomas Gleixner 12250c2277SThomas Gleixner 13250c2277SThomas Gleixner#include <linux/linkage.h> 14250c2277SThomas Gleixner#include <linux/threads.h> 15250c2277SThomas Gleixner#include <linux/init.h> 16250c2277SThomas Gleixner#include <asm/segment.h> 17250c2277SThomas Gleixner#include <asm/pgtable.h> 18250c2277SThomas Gleixner#include <asm/page.h> 19250c2277SThomas Gleixner#include <asm/msr.h> 20250c2277SThomas Gleixner#include <asm/cache.h> 21369101daSCyrill Gorcunov#include <asm/processor-flags.h> 22b12d8db8STejun Heo#include <asm/percpu.h> 239900aa2fSH. Peter Anvin#include <asm/nops.h> 247bbcdb1cSAndy Lutomirski#include "../entry/calling.h" 25784d5699SAl Viro#include <asm/export.h> 26bd89004fSPeter Zijlstra#include <asm/nospec-branch.h> 2705ab1d8aSFeng Tang#include <asm/fixmap.h> 28250c2277SThomas Gleixner 29fdc0269eSJuergen Gross#ifdef CONFIG_PARAVIRT_XXL 3049a69787SGlauber de Oliveira Costa#include <asm/asm-offsets.h> 3149a69787SGlauber de Oliveira Costa#include <asm/paravirt.h> 3249a69787SGlauber de Oliveira Costa#else 339900aa2fSH. Peter Anvin#define INTERRUPT_RETURN iretq 3449a69787SGlauber de Oliveira Costa#endif 3549a69787SGlauber de Oliveira Costa 363ad2f3fbSDaniel Mack/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 37250c2277SThomas Gleixner * because we need identity-mapped pages. 38250c2277SThomas Gleixner * 39250c2277SThomas Gleixner */ 40250c2277SThomas Gleixner 41b9952ec7SKirill A. Shutemov#define l4_index(x) (((x) >> 39) & 511) 42a6523748SEduardo Habkost#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 43a6523748SEduardo Habkost 44b9952ec7SKirill A. ShutemovL4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 45b9952ec7SKirill A. ShutemovL4_START_KERNEL = l4_index(__START_KERNEL_map) 46b9952ec7SKirill A. Shutemov 47a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map) 48a6523748SEduardo Habkost 49250c2277SThomas Gleixner .text 504ae59b91STim Abbott __HEAD 51250c2277SThomas Gleixner .code64 52250c2277SThomas Gleixner .globl startup_64 53250c2277SThomas Gleixnerstartup_64: 542704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 55250c2277SThomas Gleixner /* 561256276cSKonrad Rzeszutek Wilk * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 57250c2277SThomas Gleixner * and someone has loaded an identity mapped page table 58250c2277SThomas Gleixner * for us. These identity mapped page tables map all of the 59250c2277SThomas Gleixner * kernel pages and possibly all of memory. 60250c2277SThomas Gleixner * 618170e6beSH. Peter Anvin * %rsi holds a physical pointer to real_mode_data. 62250c2277SThomas Gleixner * 63250c2277SThomas Gleixner * We come here either directly from a 64bit bootloader, or from 645b171e82SAlexander Kuleshov * arch/x86/boot/compressed/head_64.S. 65250c2277SThomas Gleixner * 66250c2277SThomas Gleixner * We only come here initially at boot nothing else comes here. 67250c2277SThomas Gleixner * 68250c2277SThomas Gleixner * Since we may be loaded at an address different from what we were 69250c2277SThomas Gleixner * compiled to run at we first fixup the physical addresses in our page 70250c2277SThomas Gleixner * tables and then reload them. 71250c2277SThomas Gleixner */ 72250c2277SThomas Gleixner 7322dc3918SJosh Poimboeuf /* Set up the stack for verify_cpu(), similar to initial_stack below */ 7422dc3918SJosh Poimboeuf leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp 7591ed140dSBorislav Petkov 7604633df0SBorislav Petkov /* Sanitize CPU configuration */ 7704633df0SBorislav Petkov call verify_cpu 7804633df0SBorislav Petkov 795868f365STom Lendacky /* 805868f365STom Lendacky * Perform pagetable fixups. Additionally, if SME is active, encrypt 815868f365STom Lendacky * the kernel and retrieve the modifier (SME encryption mask if SME 825868f365STom Lendacky * is active) to be added to the initial pgdir entry that will be 835868f365STom Lendacky * programmed into CR3. 845868f365STom Lendacky */ 85250c2277SThomas Gleixner leaq _text(%rip), %rdi 86c88d7150SKirill A. Shutemov pushq %rsi 87c88d7150SKirill A. Shutemov call __startup_64 88c88d7150SKirill A. Shutemov popq %rsi 89250c2277SThomas Gleixner 905868f365STom Lendacky /* Form the CR3 value being sure to include the CR3 modifier */ 915868f365STom Lendacky addq $(early_top_pgt - __START_KERNEL_map), %rax 928170e6beSH. Peter Anvin jmp 1f 93250c2277SThomas GleixnerENTRY(secondary_startup_64) 942704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 95250c2277SThomas Gleixner /* 961256276cSKonrad Rzeszutek Wilk * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 97250c2277SThomas Gleixner * and someone has loaded a mapped page table. 98250c2277SThomas Gleixner * 998170e6beSH. Peter Anvin * %rsi holds a physical pointer to real_mode_data. 100250c2277SThomas Gleixner * 101250c2277SThomas Gleixner * We come here either from startup_64 (using physical addresses) 102250c2277SThomas Gleixner * or from trampoline.S (using virtual addresses). 103250c2277SThomas Gleixner * 104250c2277SThomas Gleixner * Using virtual addresses from trampoline.S removes the need 105250c2277SThomas Gleixner * to have any identity mapped pages in the kernel page table 106250c2277SThomas Gleixner * after the boot processor executes this code. 107250c2277SThomas Gleixner */ 108250c2277SThomas Gleixner 10904633df0SBorislav Petkov /* Sanitize CPU configuration */ 11004633df0SBorislav Petkov call verify_cpu 11104633df0SBorislav Petkov 1125868f365STom Lendacky /* 1135868f365STom Lendacky * Retrieve the modifier (SME encryption mask if SME is active) to be 1145868f365STom Lendacky * added to the initial pgdir entry that will be programmed into CR3. 1155868f365STom Lendacky */ 1165868f365STom Lendacky pushq %rsi 1175868f365STom Lendacky call __startup_secondary_64 1185868f365STom Lendacky popq %rsi 1195868f365STom Lendacky 1205868f365STom Lendacky /* Form the CR3 value being sure to include the CR3 modifier */ 1215868f365STom Lendacky addq $(init_top_pgt - __START_KERNEL_map), %rax 1228170e6beSH. Peter Anvin1: 1238170e6beSH. Peter Anvin 124032370b9SKirill A. Shutemov /* Enable PAE mode, PGE and LA57 */ 1258170e6beSH. Peter Anvin movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 126032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 12739b95522SKirill A. Shutemov testl $1, __pgtable_l5_enabled(%rip) 1286f9dd329SKirill A. Shutemov jz 1f 129032370b9SKirill A. Shutemov orl $X86_CR4_LA57, %ecx 1306f9dd329SKirill A. Shutemov1: 131032370b9SKirill A. Shutemov#endif 1328170e6beSH. Peter Anvin movq %rcx, %cr4 133250c2277SThomas Gleixner 134032370b9SKirill A. Shutemov /* Setup early boot stage 4-/5-level pagetables. */ 135250c2277SThomas Gleixner addq phys_base(%rip), %rax 136250c2277SThomas Gleixner movq %rax, %cr3 137250c2277SThomas Gleixner 138250c2277SThomas Gleixner /* Ensure I am executing from virtual addresses */ 139250c2277SThomas Gleixner movq $1f, %rax 140bd89004fSPeter Zijlstra ANNOTATE_RETPOLINE_SAFE 141250c2277SThomas Gleixner jmp *%rax 142250c2277SThomas Gleixner1: 1432704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 144250c2277SThomas Gleixner 145250c2277SThomas Gleixner /* Check if nx is implemented */ 146250c2277SThomas Gleixner movl $0x80000001, %eax 147250c2277SThomas Gleixner cpuid 148250c2277SThomas Gleixner movl %edx,%edi 149250c2277SThomas Gleixner 150250c2277SThomas Gleixner /* Setup EFER (Extended Feature Enable Register) */ 151250c2277SThomas Gleixner movl $MSR_EFER, %ecx 152250c2277SThomas Gleixner rdmsr 153250c2277SThomas Gleixner btsl $_EFER_SCE, %eax /* Enable System Call */ 154250c2277SThomas Gleixner btl $20,%edi /* No Execute supported? */ 155250c2277SThomas Gleixner jnc 1f 156250c2277SThomas Gleixner btsl $_EFER_NX, %eax 15778d77df7SH. Peter Anvin btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 158250c2277SThomas Gleixner1: wrmsr /* Make changes effective */ 159250c2277SThomas Gleixner 160250c2277SThomas Gleixner /* Setup cr0 */ 161369101daSCyrill Gorcunov movl $CR0_STATE, %eax 162250c2277SThomas Gleixner /* Make changes effective */ 163250c2277SThomas Gleixner movq %rax, %cr0 164250c2277SThomas Gleixner 165250c2277SThomas Gleixner /* Setup a boot time stack */ 166b32f96c7SJosh Poimboeuf movq initial_stack(%rip), %rsp 167250c2277SThomas Gleixner 168250c2277SThomas Gleixner /* zero EFLAGS after setting rsp */ 169250c2277SThomas Gleixner pushq $0 170250c2277SThomas Gleixner popfq 171250c2277SThomas Gleixner 172250c2277SThomas Gleixner /* 173250c2277SThomas Gleixner * We must switch to a new descriptor in kernel space for the GDT 174250c2277SThomas Gleixner * because soon the kernel won't have access anymore to the userspace 175250c2277SThomas Gleixner * addresses where we're currently running on. We have to do that here 176250c2277SThomas Gleixner * because in 32bit we couldn't load a 64bit linear address. 177250c2277SThomas Gleixner */ 178a939098aSGlauber Costa lgdt early_gdt_descr(%rip) 179250c2277SThomas Gleixner 1808ec6993dSBrian Gerst /* set up data segments */ 1818ec6993dSBrian Gerst xorl %eax,%eax 182250c2277SThomas Gleixner movl %eax,%ds 183250c2277SThomas Gleixner movl %eax,%ss 184250c2277SThomas Gleixner movl %eax,%es 185250c2277SThomas Gleixner 186250c2277SThomas Gleixner /* 187250c2277SThomas Gleixner * We don't really need to load %fs or %gs, but load them anyway 188250c2277SThomas Gleixner * to kill any stale realmode selectors. This allows execution 189250c2277SThomas Gleixner * under VT hardware. 190250c2277SThomas Gleixner */ 191250c2277SThomas Gleixner movl %eax,%fs 192250c2277SThomas Gleixner movl %eax,%gs 193250c2277SThomas Gleixner 194f32ff538STejun Heo /* Set up %gs. 195f32ff538STejun Heo * 19638506573SCao jin * The base of %gs always points to fixed_percpu_data. If the 19738506573SCao jin * stack protector canary is enabled, it is located at %gs:40. 19838506573SCao jin * Note that, on SMP, the boot cpu uses init data section until 19938506573SCao jin * the per cpu areas are set up. 200250c2277SThomas Gleixner */ 201250c2277SThomas Gleixner movl $MSR_GS_BASE,%ecx 202650fb439SBrian Gerst movl initial_gs(%rip),%eax 203650fb439SBrian Gerst movl initial_gs+4(%rip),%edx 204250c2277SThomas Gleixner wrmsr 205250c2277SThomas Gleixner 2068170e6beSH. Peter Anvin /* rsi is pointer to real mode structure with interesting info. 207250c2277SThomas Gleixner pass it to C */ 2088170e6beSH. Peter Anvin movq %rsi, %rdi 209250c2277SThomas Gleixner 21079d243a0SBorislav Petkov.Ljump_to_C_code: 211a9468df5SJosh Poimboeuf /* 212a9468df5SJosh Poimboeuf * Jump to run C code and to be on a real kernel address. 213250c2277SThomas Gleixner * Since we are running on identity-mapped space we have to jump 214250c2277SThomas Gleixner * to the full 64bit address, this is only possible as indirect 215250c2277SThomas Gleixner * jump. In addition we need to ensure %cs is set so we make this 216250c2277SThomas Gleixner * a far return. 2178170e6beSH. Peter Anvin * 2188170e6beSH. Peter Anvin * Note: do not change to far jump indirect with 64bit offset. 2198170e6beSH. Peter Anvin * 2208170e6beSH. Peter Anvin * AMD does not support far jump indirect with 64bit offset. 2218170e6beSH. Peter Anvin * AMD64 Architecture Programmer's Manual, Volume 3: states only 2228170e6beSH. Peter Anvin * JMP FAR mem16:16 FF /5 Far jump indirect, 2238170e6beSH. Peter Anvin * with the target specified by a far pointer in memory. 2248170e6beSH. Peter Anvin * JMP FAR mem16:32 FF /5 Far jump indirect, 2258170e6beSH. Peter Anvin * with the target specified by a far pointer in memory. 2268170e6beSH. Peter Anvin * 2278170e6beSH. Peter Anvin * Intel64 does support 64bit offset. 2288170e6beSH. Peter Anvin * Software Developer Manual Vol 2: states: 2298170e6beSH. Peter Anvin * FF /5 JMP m16:16 Jump far, absolute indirect, 2308170e6beSH. Peter Anvin * address given in m16:16 2318170e6beSH. Peter Anvin * FF /5 JMP m16:32 Jump far, absolute indirect, 2328170e6beSH. Peter Anvin * address given in m16:32. 2338170e6beSH. Peter Anvin * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 2348170e6beSH. Peter Anvin * address given in m16:64. 235250c2277SThomas Gleixner */ 23631dcfec1SJosh Poimboeuf pushq $.Lafter_lret # put return address on stack for unwinder 237a7bea830SJan Beulich xorl %ebp, %ebp # clear frame pointer 238250c2277SThomas Gleixner movq initial_code(%rip), %rax 239250c2277SThomas Gleixner pushq $__KERNEL_CS # set correct cs 240250c2277SThomas Gleixner pushq %rax # target address in negative space 241250c2277SThomas Gleixner lretq 24231dcfec1SJosh Poimboeuf.Lafter_lret: 243015a2ea5SJosh PoimboeufEND(secondary_startup_64) 244250c2277SThomas Gleixner 24504633df0SBorislav Petkov#include "verify_cpu.S" 24604633df0SBorislav Petkov 24742e78e97SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU 24842e78e97SFenghua Yu/* 24942e78e97SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 25042e78e97SFenghua Yu * up already except stack. We just set up stack here. Then call 25179d243a0SBorislav Petkov * start_secondary() via .Ljump_to_C_code. 25242e78e97SFenghua Yu */ 25342e78e97SFenghua YuENTRY(start_cpu0) 2542704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 25561a73f5cSJosh Poimboeuf movq initial_stack(%rip), %rsp 25679d243a0SBorislav Petkov jmp .Ljump_to_C_code 25761a73f5cSJosh PoimboeufEND(start_cpu0) 25842e78e97SFenghua Yu#endif 25942e78e97SFenghua Yu 260b32f96c7SJosh Poimboeuf /* Both SMP bootup and ACPI suspend change these variables */ 261da5968aeSSam Ravnborg __REFDATA 2628170e6beSH. Peter Anvin .balign 8 263b1bd27b9SJiri SlabySYM_DATA(initial_code, .quad x86_64_start_kernel) 264b1bd27b9SJiri SlabySYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) 265b1bd27b9SJiri Slaby 26622dc3918SJosh Poimboeuf/* 267b1bd27b9SJiri Slaby * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder 268b1bd27b9SJiri Slaby * reliably detect the end of the stack. 26922dc3918SJosh Poimboeuf */ 270b1bd27b9SJiri SlabySYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS) 271b9af7c0dSSuresh Siddha __FINITDATA 272250c2277SThomas Gleixner 2738170e6beSH. Peter Anvin __INIT 274cdeb6048SAndy LutomirskiENTRY(early_idt_handler_array) 275749c970aSAndi Kleen i = 0 276749c970aSAndi Kleen .rept NUM_EXCEPTION_VECTORS 27782c62fa0SJosh Poimboeuf .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 2782704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS 2799900aa2fSH. Peter Anvin pushq $0 # Dummy error code, to make stack frame uniform 2802704fbb6SJosh Poimboeuf .else 2812704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 2829900aa2fSH. Peter Anvin .endif 2839900aa2fSH. Peter Anvin pushq $i # 72(%rsp) Vector number 284cdeb6048SAndy Lutomirski jmp early_idt_handler_common 2852704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS 286749c970aSAndi Kleen i = i + 1 287cdeb6048SAndy Lutomirski .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 288749c970aSAndi Kleen .endr 2892704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=16 290015a2ea5SJosh PoimboeufEND(early_idt_handler_array) 2918866cd9dSRoland McGrath 292ef77e688SJiri SlabySYM_CODE_START_LOCAL(early_idt_handler_common) 293cdeb6048SAndy Lutomirski /* 294cdeb6048SAndy Lutomirski * The stack is the hardware frame, an error code or zero, and the 295cdeb6048SAndy Lutomirski * vector number. 296cdeb6048SAndy Lutomirski */ 2979900aa2fSH. Peter Anvin cld 2989900aa2fSH. Peter Anvin 299250c2277SThomas Gleixner incl early_recursion_flag(%rip) 3009900aa2fSH. Peter Anvin 3017bbcdb1cSAndy Lutomirski /* The vector number is currently in the pt_regs->di slot. */ 3027bbcdb1cSAndy Lutomirski pushq %rsi /* pt_regs->si */ 3037bbcdb1cSAndy Lutomirski movq 8(%rsp), %rsi /* RSI = vector number */ 3047bbcdb1cSAndy Lutomirski movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 3057bbcdb1cSAndy Lutomirski pushq %rdx /* pt_regs->dx */ 3067bbcdb1cSAndy Lutomirski pushq %rcx /* pt_regs->cx */ 3077bbcdb1cSAndy Lutomirski pushq %rax /* pt_regs->ax */ 3087bbcdb1cSAndy Lutomirski pushq %r8 /* pt_regs->r8 */ 3097bbcdb1cSAndy Lutomirski pushq %r9 /* pt_regs->r9 */ 3107bbcdb1cSAndy Lutomirski pushq %r10 /* pt_regs->r10 */ 3117bbcdb1cSAndy Lutomirski pushq %r11 /* pt_regs->r11 */ 3127bbcdb1cSAndy Lutomirski pushq %rbx /* pt_regs->bx */ 3137bbcdb1cSAndy Lutomirski pushq %rbp /* pt_regs->bp */ 3147bbcdb1cSAndy Lutomirski pushq %r12 /* pt_regs->r12 */ 3157bbcdb1cSAndy Lutomirski pushq %r13 /* pt_regs->r13 */ 3167bbcdb1cSAndy Lutomirski pushq %r14 /* pt_regs->r14 */ 3177bbcdb1cSAndy Lutomirski pushq %r15 /* pt_regs->r15 */ 3182704fbb6SJosh Poimboeuf UNWIND_HINT_REGS 3199900aa2fSH. Peter Anvin 3207bbcdb1cSAndy Lutomirski cmpq $14,%rsi /* Page fault? */ 3218170e6beSH. Peter Anvin jnz 10f 32255aedddbSPeter Zijlstra GET_CR2_INTO(%rdi) /* can clobber %rax if pv */ 3238170e6beSH. Peter Anvin call early_make_pgtable 3248170e6beSH. Peter Anvin andl %eax,%eax 3257bbcdb1cSAndy Lutomirski jz 20f /* All good */ 3268170e6beSH. Peter Anvin 3278170e6beSH. Peter Anvin10: 3287bbcdb1cSAndy Lutomirski movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 3299900aa2fSH. Peter Anvin call early_fixup_exception 3309900aa2fSH. Peter Anvin 3310e861fbbSAndy Lutomirski20: 3329900aa2fSH. Peter Anvin decl early_recursion_flag(%rip) 33326c4ef9cSAndy Lutomirski jmp restore_regs_and_return_to_kernel 334ef77e688SJiri SlabySYM_CODE_END(early_idt_handler_common) 3359900aa2fSH. Peter Anvin 336b1bd27b9SJiri Slaby 337b1bd27b9SJiri Slaby#define SYM_DATA_START_PAGE_ALIGNED(name) \ 338b1bd27b9SJiri Slaby SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) 339250c2277SThomas Gleixner 340d9e9a641SDave Hansen#ifdef CONFIG_PAGE_TABLE_ISOLATION 341d9e9a641SDave Hansen/* 342d9e9a641SDave Hansen * Each PGD needs to be 8k long and 8k aligned. We do not 343d9e9a641SDave Hansen * ever go out to userspace with these, so we do not 344d9e9a641SDave Hansen * strictly *need* the second page, but this allows us to 345d9e9a641SDave Hansen * have a single set_pgd() implementation that does not 346d9e9a641SDave Hansen * need to worry about whether it has 4k or 8k to work 347d9e9a641SDave Hansen * with. 348d9e9a641SDave Hansen * 349d9e9a641SDave Hansen * This ensures PGDs are 8k long: 350d9e9a641SDave Hansen */ 351d9e9a641SDave Hansen#define PTI_USER_PGD_FILL 512 352d9e9a641SDave Hansen/* This ensures they are 8k-aligned: */ 353b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \ 354b1bd27b9SJiri Slaby SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) 355d9e9a641SDave Hansen#else 356b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \ 357b1bd27b9SJiri Slaby SYM_DATA_START_PAGE_ALIGNED(name) 358d9e9a641SDave Hansen#define PTI_USER_PGD_FILL 0 359d9e9a641SDave Hansen#endif 360d9e9a641SDave Hansen 361250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */ 362250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT) \ 363250c2277SThomas Gleixner i = 0 ; \ 364250c2277SThomas Gleixner .rept (COUNT) ; \ 3650e192b99SCyrill Gorcunov .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 366250c2277SThomas Gleixner i = i + 1 ; \ 367250c2277SThomas Gleixner .endr 368250c2277SThomas Gleixner 3698170e6beSH. Peter Anvin __INITDATA 3701a8770b7SJiri Slaby .balign 4 3711a8770b7SJiri Slaby 372b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(early_top_pgt) 3736f9dd329SKirill A. Shutemov .fill 512,8,0 374d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 375b1bd27b9SJiri SlabySYM_DATA_END(early_top_pgt) 3768170e6beSH. Peter Anvin 377b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) 3788170e6beSH. Peter Anvin .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 379b1bd27b9SJiri SlabySYM_DATA_END(early_dynamic_pgts) 3808170e6beSH. Peter Anvin 381b1bd27b9SJiri SlabySYM_DATA(early_recursion_flag, .long 0) 3821a8770b7SJiri Slaby 383b9af7c0dSSuresh Siddha .data 3848170e6beSH. Peter Anvin 3857733607fSMaran Wilson#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) 386b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt) 38721729f81STom Lendacky .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 388b9952ec7SKirill A. Shutemov .org init_top_pgt + L4_PAGE_OFFSET*8, 0 38921729f81STom Lendacky .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 390b9952ec7SKirill A. Shutemov .org init_top_pgt + L4_START_KERNEL*8, 0 391250c2277SThomas Gleixner /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 39221729f81STom Lendacky .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 393d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 394b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt) 395250c2277SThomas Gleixner 396b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) 39721729f81STom Lendacky .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 398250c2277SThomas Gleixner .fill 511, 8, 0 399b1bd27b9SJiri SlabySYM_DATA_END(level3_ident_pgt) 400b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) 401430d4005SDave Hansen /* 402430d4005SDave Hansen * Since I easily can, map the first 1G. 4038170e6beSH. Peter Anvin * Don't set NX because code runs from these pages. 404430d4005SDave Hansen * 405430d4005SDave Hansen * Note: This sets _PAGE_GLOBAL despite whether 406430d4005SDave Hansen * the CPU supports it or it is enabled. But, 407430d4005SDave Hansen * the CPU should ignore the bit. 4088170e6beSH. Peter Anvin */ 4098170e6beSH. Peter Anvin PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 410b1bd27b9SJiri SlabySYM_DATA_END(level2_ident_pgt) 4114375c299SKirill A. Shutemov#else 412b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt) 4134375c299SKirill A. Shutemov .fill 512,8,0 414d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 415b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt) 4168170e6beSH. Peter Anvin#endif 417250c2277SThomas Gleixner 418032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 419b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) 420032370b9SKirill A. Shutemov .fill 511,8,0 42121729f81STom Lendacky .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 422b1bd27b9SJiri SlabySYM_DATA_END(level4_kernel_pgt) 423032370b9SKirill A. Shutemov#endif 424032370b9SKirill A. Shutemov 425b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) 426a6523748SEduardo Habkost .fill L3_START_KERNEL,8,0 427250c2277SThomas Gleixner /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 42821729f81STom Lendacky .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 42921729f81STom Lendacky .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 430b1bd27b9SJiri SlabySYM_DATA_END(level3_kernel_pgt) 431250c2277SThomas Gleixner 432b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) 43388f3aec7SIngo Molnar /* 43485eb69a1SIngo Molnar * 512 MB kernel mapping. We spend a full page on this pagetable 43588f3aec7SIngo Molnar * anyway. 43688f3aec7SIngo Molnar * 43788f3aec7SIngo Molnar * The kernel code+data+bss must not be bigger than that. 43888f3aec7SIngo Molnar * 43985eb69a1SIngo Molnar * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 44088f3aec7SIngo Molnar * If you want to increase this then increase MODULES_VADDR 44188f3aec7SIngo Molnar * too.) 442430d4005SDave Hansen * 443430d4005SDave Hansen * This table is eventually used by the kernel during normal 444430d4005SDave Hansen * runtime. Care must be taken to clear out undesired bits 445430d4005SDave Hansen * later, like _PAGE_RW or _PAGE_GLOBAL in some cases. 44688f3aec7SIngo Molnar */ 4478490638cSJeremy Fitzhardinge PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 448d4afe414SIngo Molnar KERNEL_IMAGE_SIZE/PMD_SIZE) 449b1bd27b9SJiri SlabySYM_DATA_END(level2_kernel_pgt) 450250c2277SThomas Gleixner 451b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) 45205ab1d8aSFeng Tang .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 45305ab1d8aSFeng Tang pgtno = 0 45405ab1d8aSFeng Tang .rept (FIXMAP_PMD_NUM) 45505ab1d8aSFeng Tang .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ 45605ab1d8aSFeng Tang + _PAGE_TABLE_NOENC; 45705ab1d8aSFeng Tang pgtno = pgtno + 1 45805ab1d8aSFeng Tang .endr 45905ab1d8aSFeng Tang /* 6 MB reserved space + a 2MB hole */ 46005ab1d8aSFeng Tang .fill 4,8,0 461b1bd27b9SJiri SlabySYM_DATA_END(level2_fixmap_pgt) 4628170e6beSH. Peter Anvin 463b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) 46405ab1d8aSFeng Tang .rept (FIXMAP_PMD_NUM) 465250c2277SThomas Gleixner .fill 512,8,0 46605ab1d8aSFeng Tang .endr 467b1bd27b9SJiri SlabySYM_DATA_END(level1_fixmap_pgt) 468250c2277SThomas Gleixner 469250c2277SThomas Gleixner#undef PMDS 470250c2277SThomas Gleixner 471250c2277SThomas Gleixner .data 472250c2277SThomas Gleixner .align 16 473250c2277SThomas Gleixner 474b1bd27b9SJiri SlabySYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1) 475b1bd27b9SJiri SlabySYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page)) 476b1bd27b9SJiri Slaby 477b1bd27b9SJiri Slaby .align 16 478250c2277SThomas Gleixner/* This must match the first entry in level2_kernel_pgt */ 479b1bd27b9SJiri SlabySYM_DATA(phys_base, .quad 0x0) 480784d5699SAl ViroEXPORT_SYMBOL(phys_base) 481250c2277SThomas Gleixner 4828c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S" 483250c2277SThomas Gleixner 48402b7da37STim Abbott __PAGE_ALIGNED_BSS 485b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(empty_zero_page) 486250c2277SThomas Gleixner .skip PAGE_SIZE 487b1bd27b9SJiri SlabySYM_DATA_END(empty_zero_page) 488784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page) 489ef7f0d6aSAndrey Ryabinin 490