1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 2250c2277SThomas Gleixner/* 35b171e82SAlexander Kuleshov * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 4250c2277SThomas Gleixner * 5250c2277SThomas Gleixner * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6250c2277SThomas Gleixner * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 7250c2277SThomas Gleixner * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 8250c2277SThomas Gleixner * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 9250c2277SThomas Gleixner * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 10250c2277SThomas Gleixner */ 11250c2277SThomas Gleixner 12250c2277SThomas Gleixner 13250c2277SThomas Gleixner#include <linux/linkage.h> 14250c2277SThomas Gleixner#include <linux/threads.h> 15250c2277SThomas Gleixner#include <linux/init.h> 16250c2277SThomas Gleixner#include <asm/segment.h> 17250c2277SThomas Gleixner#include <asm/pgtable.h> 18250c2277SThomas Gleixner#include <asm/page.h> 19250c2277SThomas Gleixner#include <asm/msr.h> 20250c2277SThomas Gleixner#include <asm/cache.h> 21369101daSCyrill Gorcunov#include <asm/processor-flags.h> 22b12d8db8STejun Heo#include <asm/percpu.h> 239900aa2fSH. Peter Anvin#include <asm/nops.h> 247bbcdb1cSAndy Lutomirski#include "../entry/calling.h" 25784d5699SAl Viro#include <asm/export.h> 26bd89004fSPeter Zijlstra#include <asm/nospec-branch.h> 2705ab1d8aSFeng Tang#include <asm/fixmap.h> 28250c2277SThomas Gleixner 29fdc0269eSJuergen Gross#ifdef CONFIG_PARAVIRT_XXL 3049a69787SGlauber de Oliveira Costa#include <asm/asm-offsets.h> 3149a69787SGlauber de Oliveira Costa#include <asm/paravirt.h> 32ffc4bc9cSH. Peter Anvin#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 3349a69787SGlauber de Oliveira Costa#else 34ffc4bc9cSH. Peter Anvin#define GET_CR2_INTO(reg) movq %cr2, reg 359900aa2fSH. Peter Anvin#define INTERRUPT_RETURN iretq 3649a69787SGlauber de Oliveira Costa#endif 3749a69787SGlauber de Oliveira Costa 383ad2f3fbSDaniel Mack/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 39250c2277SThomas Gleixner * because we need identity-mapped pages. 40250c2277SThomas Gleixner * 41250c2277SThomas Gleixner */ 42250c2277SThomas Gleixner 43b9952ec7SKirill A. Shutemov#define l4_index(x) (((x) >> 39) & 511) 44a6523748SEduardo Habkost#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 45a6523748SEduardo Habkost 46b9952ec7SKirill A. ShutemovL4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) 47b9952ec7SKirill A. ShutemovL4_START_KERNEL = l4_index(__START_KERNEL_map) 48b9952ec7SKirill A. Shutemov 49a6523748SEduardo HabkostL3_START_KERNEL = pud_index(__START_KERNEL_map) 50a6523748SEduardo Habkost 51250c2277SThomas Gleixner .text 524ae59b91STim Abbott __HEAD 53250c2277SThomas Gleixner .code64 54250c2277SThomas Gleixner .globl startup_64 55250c2277SThomas Gleixnerstartup_64: 562704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 57250c2277SThomas Gleixner /* 581256276cSKonrad Rzeszutek Wilk * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 59250c2277SThomas Gleixner * and someone has loaded an identity mapped page table 60250c2277SThomas Gleixner * for us. These identity mapped page tables map all of the 61250c2277SThomas Gleixner * kernel pages and possibly all of memory. 62250c2277SThomas Gleixner * 638170e6beSH. Peter Anvin * %rsi holds a physical pointer to real_mode_data. 64250c2277SThomas Gleixner * 65250c2277SThomas Gleixner * We come here either directly from a 64bit bootloader, or from 665b171e82SAlexander Kuleshov * arch/x86/boot/compressed/head_64.S. 67250c2277SThomas Gleixner * 68250c2277SThomas Gleixner * We only come here initially at boot nothing else comes here. 69250c2277SThomas Gleixner * 70250c2277SThomas Gleixner * Since we may be loaded at an address different from what we were 71250c2277SThomas Gleixner * compiled to run at we first fixup the physical addresses in our page 72250c2277SThomas Gleixner * tables and then reload them. 73250c2277SThomas Gleixner */ 74250c2277SThomas Gleixner 7522dc3918SJosh Poimboeuf /* Set up the stack for verify_cpu(), similar to initial_stack below */ 7622dc3918SJosh Poimboeuf leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp 7791ed140dSBorislav Petkov 7804633df0SBorislav Petkov /* Sanitize CPU configuration */ 7904633df0SBorislav Petkov call verify_cpu 8004633df0SBorislav Petkov 815868f365STom Lendacky /* 825868f365STom Lendacky * Perform pagetable fixups. Additionally, if SME is active, encrypt 835868f365STom Lendacky * the kernel and retrieve the modifier (SME encryption mask if SME 845868f365STom Lendacky * is active) to be added to the initial pgdir entry that will be 855868f365STom Lendacky * programmed into CR3. 865868f365STom Lendacky */ 87250c2277SThomas Gleixner leaq _text(%rip), %rdi 88c88d7150SKirill A. Shutemov pushq %rsi 89c88d7150SKirill A. Shutemov call __startup_64 90c88d7150SKirill A. Shutemov popq %rsi 91250c2277SThomas Gleixner 925868f365STom Lendacky /* Form the CR3 value being sure to include the CR3 modifier */ 935868f365STom Lendacky addq $(early_top_pgt - __START_KERNEL_map), %rax 948170e6beSH. Peter Anvin jmp 1f 95250c2277SThomas GleixnerENTRY(secondary_startup_64) 962704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 97250c2277SThomas Gleixner /* 981256276cSKonrad Rzeszutek Wilk * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 99250c2277SThomas Gleixner * and someone has loaded a mapped page table. 100250c2277SThomas Gleixner * 1018170e6beSH. Peter Anvin * %rsi holds a physical pointer to real_mode_data. 102250c2277SThomas Gleixner * 103250c2277SThomas Gleixner * We come here either from startup_64 (using physical addresses) 104250c2277SThomas Gleixner * or from trampoline.S (using virtual addresses). 105250c2277SThomas Gleixner * 106250c2277SThomas Gleixner * Using virtual addresses from trampoline.S removes the need 107250c2277SThomas Gleixner * to have any identity mapped pages in the kernel page table 108250c2277SThomas Gleixner * after the boot processor executes this code. 109250c2277SThomas Gleixner */ 110250c2277SThomas Gleixner 11104633df0SBorislav Petkov /* Sanitize CPU configuration */ 11204633df0SBorislav Petkov call verify_cpu 11304633df0SBorislav Petkov 1145868f365STom Lendacky /* 1155868f365STom Lendacky * Retrieve the modifier (SME encryption mask if SME is active) to be 1165868f365STom Lendacky * added to the initial pgdir entry that will be programmed into CR3. 1175868f365STom Lendacky */ 1185868f365STom Lendacky pushq %rsi 1195868f365STom Lendacky call __startup_secondary_64 1205868f365STom Lendacky popq %rsi 1215868f365STom Lendacky 1225868f365STom Lendacky /* Form the CR3 value being sure to include the CR3 modifier */ 1235868f365STom Lendacky addq $(init_top_pgt - __START_KERNEL_map), %rax 1248170e6beSH. Peter Anvin1: 1258170e6beSH. Peter Anvin 126032370b9SKirill A. Shutemov /* Enable PAE mode, PGE and LA57 */ 1278170e6beSH. Peter Anvin movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 128032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 12939b95522SKirill A. Shutemov testl $1, __pgtable_l5_enabled(%rip) 1306f9dd329SKirill A. Shutemov jz 1f 131032370b9SKirill A. Shutemov orl $X86_CR4_LA57, %ecx 1326f9dd329SKirill A. Shutemov1: 133032370b9SKirill A. Shutemov#endif 1348170e6beSH. Peter Anvin movq %rcx, %cr4 135250c2277SThomas Gleixner 136032370b9SKirill A. Shutemov /* Setup early boot stage 4-/5-level pagetables. */ 137250c2277SThomas Gleixner addq phys_base(%rip), %rax 138250c2277SThomas Gleixner movq %rax, %cr3 139250c2277SThomas Gleixner 140250c2277SThomas Gleixner /* Ensure I am executing from virtual addresses */ 141250c2277SThomas Gleixner movq $1f, %rax 142bd89004fSPeter Zijlstra ANNOTATE_RETPOLINE_SAFE 143250c2277SThomas Gleixner jmp *%rax 144250c2277SThomas Gleixner1: 1452704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 146250c2277SThomas Gleixner 147250c2277SThomas Gleixner /* Check if nx is implemented */ 148250c2277SThomas Gleixner movl $0x80000001, %eax 149250c2277SThomas Gleixner cpuid 150250c2277SThomas Gleixner movl %edx,%edi 151250c2277SThomas Gleixner 152250c2277SThomas Gleixner /* Setup EFER (Extended Feature Enable Register) */ 153250c2277SThomas Gleixner movl $MSR_EFER, %ecx 154250c2277SThomas Gleixner rdmsr 155250c2277SThomas Gleixner btsl $_EFER_SCE, %eax /* Enable System Call */ 156250c2277SThomas Gleixner btl $20,%edi /* No Execute supported? */ 157250c2277SThomas Gleixner jnc 1f 158250c2277SThomas Gleixner btsl $_EFER_NX, %eax 15978d77df7SH. Peter Anvin btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 160250c2277SThomas Gleixner1: wrmsr /* Make changes effective */ 161250c2277SThomas Gleixner 162250c2277SThomas Gleixner /* Setup cr0 */ 163369101daSCyrill Gorcunov movl $CR0_STATE, %eax 164250c2277SThomas Gleixner /* Make changes effective */ 165250c2277SThomas Gleixner movq %rax, %cr0 166250c2277SThomas Gleixner 167250c2277SThomas Gleixner /* Setup a boot time stack */ 168b32f96c7SJosh Poimboeuf movq initial_stack(%rip), %rsp 169250c2277SThomas Gleixner 170250c2277SThomas Gleixner /* zero EFLAGS after setting rsp */ 171250c2277SThomas Gleixner pushq $0 172250c2277SThomas Gleixner popfq 173250c2277SThomas Gleixner 174250c2277SThomas Gleixner /* 175250c2277SThomas Gleixner * We must switch to a new descriptor in kernel space for the GDT 176250c2277SThomas Gleixner * because soon the kernel won't have access anymore to the userspace 177250c2277SThomas Gleixner * addresses where we're currently running on. We have to do that here 178250c2277SThomas Gleixner * because in 32bit we couldn't load a 64bit linear address. 179250c2277SThomas Gleixner */ 180a939098aSGlauber Costa lgdt early_gdt_descr(%rip) 181250c2277SThomas Gleixner 1828ec6993dSBrian Gerst /* set up data segments */ 1838ec6993dSBrian Gerst xorl %eax,%eax 184250c2277SThomas Gleixner movl %eax,%ds 185250c2277SThomas Gleixner movl %eax,%ss 186250c2277SThomas Gleixner movl %eax,%es 187250c2277SThomas Gleixner 188250c2277SThomas Gleixner /* 189250c2277SThomas Gleixner * We don't really need to load %fs or %gs, but load them anyway 190250c2277SThomas Gleixner * to kill any stale realmode selectors. This allows execution 191250c2277SThomas Gleixner * under VT hardware. 192250c2277SThomas Gleixner */ 193250c2277SThomas Gleixner movl %eax,%fs 194250c2277SThomas Gleixner movl %eax,%gs 195250c2277SThomas Gleixner 196f32ff538STejun Heo /* Set up %gs. 197f32ff538STejun Heo * 198947e76cdSBrian Gerst * The base of %gs always points to the bottom of the irqstack 199947e76cdSBrian Gerst * union. If the stack protector canary is enabled, it is 200947e76cdSBrian Gerst * located at %gs:40. Note that, on SMP, the boot cpu uses 201947e76cdSBrian Gerst * init data section till per cpu areas are set up. 202250c2277SThomas Gleixner */ 203250c2277SThomas Gleixner movl $MSR_GS_BASE,%ecx 204650fb439SBrian Gerst movl initial_gs(%rip),%eax 205650fb439SBrian Gerst movl initial_gs+4(%rip),%edx 206250c2277SThomas Gleixner wrmsr 207250c2277SThomas Gleixner 2088170e6beSH. Peter Anvin /* rsi is pointer to real mode structure with interesting info. 209250c2277SThomas Gleixner pass it to C */ 2108170e6beSH. Peter Anvin movq %rsi, %rdi 211250c2277SThomas Gleixner 21279d243a0SBorislav Petkov.Ljump_to_C_code: 213a9468df5SJosh Poimboeuf /* 214a9468df5SJosh Poimboeuf * Jump to run C code and to be on a real kernel address. 215250c2277SThomas Gleixner * Since we are running on identity-mapped space we have to jump 216250c2277SThomas Gleixner * to the full 64bit address, this is only possible as indirect 217250c2277SThomas Gleixner * jump. In addition we need to ensure %cs is set so we make this 218250c2277SThomas Gleixner * a far return. 2198170e6beSH. Peter Anvin * 2208170e6beSH. Peter Anvin * Note: do not change to far jump indirect with 64bit offset. 2218170e6beSH. Peter Anvin * 2228170e6beSH. Peter Anvin * AMD does not support far jump indirect with 64bit offset. 2238170e6beSH. Peter Anvin * AMD64 Architecture Programmer's Manual, Volume 3: states only 2248170e6beSH. Peter Anvin * JMP FAR mem16:16 FF /5 Far jump indirect, 2258170e6beSH. Peter Anvin * with the target specified by a far pointer in memory. 2268170e6beSH. Peter Anvin * JMP FAR mem16:32 FF /5 Far jump indirect, 2278170e6beSH. Peter Anvin * with the target specified by a far pointer in memory. 2288170e6beSH. Peter Anvin * 2298170e6beSH. Peter Anvin * Intel64 does support 64bit offset. 2308170e6beSH. Peter Anvin * Software Developer Manual Vol 2: states: 2318170e6beSH. Peter Anvin * FF /5 JMP m16:16 Jump far, absolute indirect, 2328170e6beSH. Peter Anvin * address given in m16:16 2338170e6beSH. Peter Anvin * FF /5 JMP m16:32 Jump far, absolute indirect, 2348170e6beSH. Peter Anvin * address given in m16:32. 2358170e6beSH. Peter Anvin * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 2368170e6beSH. Peter Anvin * address given in m16:64. 237250c2277SThomas Gleixner */ 23831dcfec1SJosh Poimboeuf pushq $.Lafter_lret # put return address on stack for unwinder 239a7bea830SJan Beulich xorl %ebp, %ebp # clear frame pointer 240250c2277SThomas Gleixner movq initial_code(%rip), %rax 241250c2277SThomas Gleixner pushq $__KERNEL_CS # set correct cs 242250c2277SThomas Gleixner pushq %rax # target address in negative space 243250c2277SThomas Gleixner lretq 24431dcfec1SJosh Poimboeuf.Lafter_lret: 245015a2ea5SJosh PoimboeufEND(secondary_startup_64) 246250c2277SThomas Gleixner 24704633df0SBorislav Petkov#include "verify_cpu.S" 24804633df0SBorislav Petkov 24942e78e97SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU 25042e78e97SFenghua Yu/* 25142e78e97SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 25242e78e97SFenghua Yu * up already except stack. We just set up stack here. Then call 25379d243a0SBorislav Petkov * start_secondary() via .Ljump_to_C_code. 25442e78e97SFenghua Yu */ 25542e78e97SFenghua YuENTRY(start_cpu0) 256b32f96c7SJosh Poimboeuf movq initial_stack(%rip), %rsp 2572704fbb6SJosh Poimboeuf UNWIND_HINT_EMPTY 25879d243a0SBorislav Petkov jmp .Ljump_to_C_code 25942e78e97SFenghua YuENDPROC(start_cpu0) 26042e78e97SFenghua Yu#endif 26142e78e97SFenghua Yu 262b32f96c7SJosh Poimboeuf /* Both SMP bootup and ACPI suspend change these variables */ 263da5968aeSSam Ravnborg __REFDATA 2648170e6beSH. Peter Anvin .balign 8 2658170e6beSH. Peter Anvin GLOBAL(initial_code) 266250c2277SThomas Gleixner .quad x86_64_start_kernel 2678170e6beSH. Peter Anvin GLOBAL(initial_gs) 2682add8e23SBrian Gerst .quad INIT_PER_CPU_VAR(irq_stack_union) 269b32f96c7SJosh Poimboeuf GLOBAL(initial_stack) 27022dc3918SJosh Poimboeuf /* 27122dc3918SJosh Poimboeuf * The SIZEOF_PTREGS gap is a convention which helps the in-kernel 27222dc3918SJosh Poimboeuf * unwinder reliably detect the end of the stack. 27322dc3918SJosh Poimboeuf */ 27422dc3918SJosh Poimboeuf .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS 275b9af7c0dSSuresh Siddha __FINITDATA 276250c2277SThomas Gleixner 2778170e6beSH. Peter Anvin __INIT 278cdeb6048SAndy LutomirskiENTRY(early_idt_handler_array) 279749c970aSAndi Kleen i = 0 280749c970aSAndi Kleen .rept NUM_EXCEPTION_VECTORS 28182c62fa0SJosh Poimboeuf .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 2822704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS 2839900aa2fSH. Peter Anvin pushq $0 # Dummy error code, to make stack frame uniform 2842704fbb6SJosh Poimboeuf .else 2852704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=8 2869900aa2fSH. Peter Anvin .endif 2879900aa2fSH. Peter Anvin pushq $i # 72(%rsp) Vector number 288cdeb6048SAndy Lutomirski jmp early_idt_handler_common 2892704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS 290749c970aSAndi Kleen i = i + 1 291cdeb6048SAndy Lutomirski .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 292749c970aSAndi Kleen .endr 2932704fbb6SJosh Poimboeuf UNWIND_HINT_IRET_REGS offset=16 294015a2ea5SJosh PoimboeufEND(early_idt_handler_array) 2958866cd9dSRoland McGrath 296cdeb6048SAndy Lutomirskiearly_idt_handler_common: 297cdeb6048SAndy Lutomirski /* 298cdeb6048SAndy Lutomirski * The stack is the hardware frame, an error code or zero, and the 299cdeb6048SAndy Lutomirski * vector number. 300cdeb6048SAndy Lutomirski */ 3019900aa2fSH. Peter Anvin cld 3029900aa2fSH. Peter Anvin 303250c2277SThomas Gleixner incl early_recursion_flag(%rip) 3049900aa2fSH. Peter Anvin 3057bbcdb1cSAndy Lutomirski /* The vector number is currently in the pt_regs->di slot. */ 3067bbcdb1cSAndy Lutomirski pushq %rsi /* pt_regs->si */ 3077bbcdb1cSAndy Lutomirski movq 8(%rsp), %rsi /* RSI = vector number */ 3087bbcdb1cSAndy Lutomirski movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 3097bbcdb1cSAndy Lutomirski pushq %rdx /* pt_regs->dx */ 3107bbcdb1cSAndy Lutomirski pushq %rcx /* pt_regs->cx */ 3117bbcdb1cSAndy Lutomirski pushq %rax /* pt_regs->ax */ 3127bbcdb1cSAndy Lutomirski pushq %r8 /* pt_regs->r8 */ 3137bbcdb1cSAndy Lutomirski pushq %r9 /* pt_regs->r9 */ 3147bbcdb1cSAndy Lutomirski pushq %r10 /* pt_regs->r10 */ 3157bbcdb1cSAndy Lutomirski pushq %r11 /* pt_regs->r11 */ 3167bbcdb1cSAndy Lutomirski pushq %rbx /* pt_regs->bx */ 3177bbcdb1cSAndy Lutomirski pushq %rbp /* pt_regs->bp */ 3187bbcdb1cSAndy Lutomirski pushq %r12 /* pt_regs->r12 */ 3197bbcdb1cSAndy Lutomirski pushq %r13 /* pt_regs->r13 */ 3207bbcdb1cSAndy Lutomirski pushq %r14 /* pt_regs->r14 */ 3217bbcdb1cSAndy Lutomirski pushq %r15 /* pt_regs->r15 */ 3222704fbb6SJosh Poimboeuf UNWIND_HINT_REGS 3239900aa2fSH. Peter Anvin 3247bbcdb1cSAndy Lutomirski cmpq $14,%rsi /* Page fault? */ 3258170e6beSH. Peter Anvin jnz 10f 3267bbcdb1cSAndy Lutomirski GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */ 3278170e6beSH. Peter Anvin call early_make_pgtable 3288170e6beSH. Peter Anvin andl %eax,%eax 3297bbcdb1cSAndy Lutomirski jz 20f /* All good */ 3308170e6beSH. Peter Anvin 3318170e6beSH. Peter Anvin10: 3327bbcdb1cSAndy Lutomirski movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 3339900aa2fSH. Peter Anvin call early_fixup_exception 3349900aa2fSH. Peter Anvin 3350e861fbbSAndy Lutomirski20: 3369900aa2fSH. Peter Anvin decl early_recursion_flag(%rip) 33726c4ef9cSAndy Lutomirski jmp restore_regs_and_return_to_kernel 338015a2ea5SJosh PoimboeufEND(early_idt_handler_common) 3399900aa2fSH. Peter Anvin 3408170e6beSH. Peter Anvin __INITDATA 3418170e6beSH. Peter Anvin 3429900aa2fSH. Peter Anvin .balign 4 3430e861fbbSAndy LutomirskiGLOBAL(early_recursion_flag) 344250c2277SThomas Gleixner .long 0 345250c2277SThomas Gleixner 346250c2277SThomas Gleixner#define NEXT_PAGE(name) \ 347250c2277SThomas Gleixner .balign PAGE_SIZE; \ 3488170e6beSH. Peter AnvinGLOBAL(name) 349250c2277SThomas Gleixner 350d9e9a641SDave Hansen#ifdef CONFIG_PAGE_TABLE_ISOLATION 351d9e9a641SDave Hansen/* 352d9e9a641SDave Hansen * Each PGD needs to be 8k long and 8k aligned. We do not 353d9e9a641SDave Hansen * ever go out to userspace with these, so we do not 354d9e9a641SDave Hansen * strictly *need* the second page, but this allows us to 355d9e9a641SDave Hansen * have a single set_pgd() implementation that does not 356d9e9a641SDave Hansen * need to worry about whether it has 4k or 8k to work 357d9e9a641SDave Hansen * with. 358d9e9a641SDave Hansen * 359d9e9a641SDave Hansen * This ensures PGDs are 8k long: 360d9e9a641SDave Hansen */ 361d9e9a641SDave Hansen#define PTI_USER_PGD_FILL 512 362d9e9a641SDave Hansen/* This ensures they are 8k-aligned: */ 363d9e9a641SDave Hansen#define NEXT_PGD_PAGE(name) \ 364d9e9a641SDave Hansen .balign 2 * PAGE_SIZE; \ 365d9e9a641SDave HansenGLOBAL(name) 366d9e9a641SDave Hansen#else 367d9e9a641SDave Hansen#define NEXT_PGD_PAGE(name) NEXT_PAGE(name) 368d9e9a641SDave Hansen#define PTI_USER_PGD_FILL 0 369d9e9a641SDave Hansen#endif 370d9e9a641SDave Hansen 371250c2277SThomas Gleixner/* Automate the creation of 1 to 1 mapping pmd entries */ 372250c2277SThomas Gleixner#define PMDS(START, PERM, COUNT) \ 373250c2277SThomas Gleixner i = 0 ; \ 374250c2277SThomas Gleixner .rept (COUNT) ; \ 3750e192b99SCyrill Gorcunov .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 376250c2277SThomas Gleixner i = i + 1 ; \ 377250c2277SThomas Gleixner .endr 378250c2277SThomas Gleixner 3798170e6beSH. Peter Anvin __INITDATA 380d9e9a641SDave HansenNEXT_PGD_PAGE(early_top_pgt) 3816f9dd329SKirill A. Shutemov .fill 512,8,0 382d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 3838170e6beSH. Peter Anvin 3848170e6beSH. Peter AnvinNEXT_PAGE(early_dynamic_pgts) 3858170e6beSH. Peter Anvin .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 3868170e6beSH. Peter Anvin 387b9af7c0dSSuresh Siddha .data 3888170e6beSH. Peter Anvin 3897733607fSMaran Wilson#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) 390d9e9a641SDave HansenNEXT_PGD_PAGE(init_top_pgt) 39121729f81STom Lendacky .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 392b9952ec7SKirill A. Shutemov .org init_top_pgt + L4_PAGE_OFFSET*8, 0 39321729f81STom Lendacky .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 394b9952ec7SKirill A. Shutemov .org init_top_pgt + L4_START_KERNEL*8, 0 395250c2277SThomas Gleixner /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 39621729f81STom Lendacky .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 397d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 398250c2277SThomas Gleixner 399250c2277SThomas GleixnerNEXT_PAGE(level3_ident_pgt) 40021729f81STom Lendacky .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 401250c2277SThomas Gleixner .fill 511, 8, 0 4028170e6beSH. Peter AnvinNEXT_PAGE(level2_ident_pgt) 403430d4005SDave Hansen /* 404430d4005SDave Hansen * Since I easily can, map the first 1G. 4058170e6beSH. Peter Anvin * Don't set NX because code runs from these pages. 406430d4005SDave Hansen * 407430d4005SDave Hansen * Note: This sets _PAGE_GLOBAL despite whether 408430d4005SDave Hansen * the CPU supports it or it is enabled. But, 409430d4005SDave Hansen * the CPU should ignore the bit. 4108170e6beSH. Peter Anvin */ 4118170e6beSH. Peter Anvin PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 4124375c299SKirill A. Shutemov#else 413d9e9a641SDave HansenNEXT_PGD_PAGE(init_top_pgt) 4144375c299SKirill A. Shutemov .fill 512,8,0 415d9e9a641SDave Hansen .fill PTI_USER_PGD_FILL,8,0 4168170e6beSH. Peter Anvin#endif 417250c2277SThomas Gleixner 418032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL 419032370b9SKirill A. ShutemovNEXT_PAGE(level4_kernel_pgt) 420032370b9SKirill A. Shutemov .fill 511,8,0 42121729f81STom Lendacky .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 422032370b9SKirill A. Shutemov#endif 423032370b9SKirill A. Shutemov 424250c2277SThomas GleixnerNEXT_PAGE(level3_kernel_pgt) 425a6523748SEduardo Habkost .fill L3_START_KERNEL,8,0 426250c2277SThomas Gleixner /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 42721729f81STom Lendacky .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 42821729f81STom Lendacky .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 429250c2277SThomas Gleixner 430250c2277SThomas GleixnerNEXT_PAGE(level2_kernel_pgt) 43188f3aec7SIngo Molnar /* 43285eb69a1SIngo Molnar * 512 MB kernel mapping. We spend a full page on this pagetable 43388f3aec7SIngo Molnar * anyway. 43488f3aec7SIngo Molnar * 43588f3aec7SIngo Molnar * The kernel code+data+bss must not be bigger than that. 43688f3aec7SIngo Molnar * 43785eb69a1SIngo Molnar * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 43888f3aec7SIngo Molnar * If you want to increase this then increase MODULES_VADDR 43988f3aec7SIngo Molnar * too.) 440430d4005SDave Hansen * 441430d4005SDave Hansen * This table is eventually used by the kernel during normal 442430d4005SDave Hansen * runtime. Care must be taken to clear out undesired bits 443430d4005SDave Hansen * later, like _PAGE_RW or _PAGE_GLOBAL in some cases. 44488f3aec7SIngo Molnar */ 4458490638cSJeremy Fitzhardinge PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 446d4afe414SIngo Molnar KERNEL_IMAGE_SIZE/PMD_SIZE) 447250c2277SThomas Gleixner 4488170e6beSH. Peter AnvinNEXT_PAGE(level2_fixmap_pgt) 44905ab1d8aSFeng Tang .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 45005ab1d8aSFeng Tang pgtno = 0 45105ab1d8aSFeng Tang .rept (FIXMAP_PMD_NUM) 45205ab1d8aSFeng Tang .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ 45305ab1d8aSFeng Tang + _PAGE_TABLE_NOENC; 45405ab1d8aSFeng Tang pgtno = pgtno + 1 45505ab1d8aSFeng Tang .endr 45605ab1d8aSFeng Tang /* 6 MB reserved space + a 2MB hole */ 45705ab1d8aSFeng Tang .fill 4,8,0 4588170e6beSH. Peter Anvin 4598170e6beSH. Peter AnvinNEXT_PAGE(level1_fixmap_pgt) 46005ab1d8aSFeng Tang .rept (FIXMAP_PMD_NUM) 461250c2277SThomas Gleixner .fill 512,8,0 46205ab1d8aSFeng Tang .endr 463250c2277SThomas Gleixner 464250c2277SThomas Gleixner#undef PMDS 465250c2277SThomas Gleixner 466250c2277SThomas Gleixner .data 467250c2277SThomas Gleixner .align 16 468a939098aSGlauber Costa .globl early_gdt_descr 469a939098aSGlauber Costaearly_gdt_descr: 470a939098aSGlauber Costa .word GDT_ENTRIES*8-1 4713e5d8f97STejun Heoearly_gdt_descr_base: 4722add8e23SBrian Gerst .quad INIT_PER_CPU_VAR(gdt_page) 473250c2277SThomas Gleixner 474250c2277SThomas GleixnerENTRY(phys_base) 475250c2277SThomas Gleixner /* This must match the first entry in level2_kernel_pgt */ 476250c2277SThomas Gleixner .quad 0x0000000000000000 477784d5699SAl ViroEXPORT_SYMBOL(phys_base) 478250c2277SThomas Gleixner 4798c5e5ac3SJeremy Fitzhardinge#include "../../x86/xen/xen-head.S" 480250c2277SThomas Gleixner 48102b7da37STim Abbott __PAGE_ALIGNED_BSS 4828170e6beSH. Peter AnvinNEXT_PAGE(empty_zero_page) 483250c2277SThomas Gleixner .skip PAGE_SIZE 484784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page) 485ef7f0d6aSAndrey Ryabinin 486