1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 29a163ed8SThomas Gleixner/* 39a163ed8SThomas Gleixner * 49a163ed8SThomas Gleixner * Copyright (C) 1991, 1992 Linus Torvalds 59a163ed8SThomas Gleixner * 69a163ed8SThomas Gleixner * Enhanced CPU detection and feature setting code by Mike Jagdis 79a163ed8SThomas Gleixner * and Martin Mares, November 1997. 89a163ed8SThomas Gleixner */ 99a163ed8SThomas Gleixner 109a163ed8SThomas Gleixner.text 119a163ed8SThomas Gleixner#include <linux/threads.h> 128b2f7fffSSam Ravnborg#include <linux/init.h> 139a163ed8SThomas Gleixner#include <linux/linkage.h> 149a163ed8SThomas Gleixner#include <asm/segment.h> 150341c14dSJeremy Fitzhardinge#include <asm/page_types.h> 160341c14dSJeremy Fitzhardinge#include <asm/pgtable_types.h> 179a163ed8SThomas Gleixner#include <asm/cache.h> 189a163ed8SThomas Gleixner#include <asm/thread_info.h> 199a163ed8SThomas Gleixner#include <asm/asm-offsets.h> 209a163ed8SThomas Gleixner#include <asm/setup.h> 21551889a6SIan Campbell#include <asm/processor-flags.h> 228a50e513SH. Peter Anvin#include <asm/msr-index.h> 23cd4d09ecSBorislav Petkov#include <asm/cpufeatures.h> 2460a5317fSTejun Heo#include <asm/percpu.h> 254c5023a3SH. Peter Anvin#include <asm/nops.h> 26*3131ef39SJiri Slaby#include <asm/nospec-branch.h> 27fb148d83SAlexander Kuleshov#include <asm/bootparam.h> 28784d5699SAl Viro#include <asm/export.h> 291e620f9bSBoris Ostrovsky#include <asm/pgtable_32.h> 30551889a6SIan Campbell 31551889a6SIan Campbell/* Physical address */ 32551889a6SIan Campbell#define pa(X) ((X) - __PAGE_OFFSET) 339a163ed8SThomas Gleixner 349a163ed8SThomas Gleixner/* 359a163ed8SThomas Gleixner * References to members of the new_cpu_data structure. 369a163ed8SThomas Gleixner */ 379a163ed8SThomas Gleixner 389a163ed8SThomas Gleixner#define X86 new_cpu_data+CPUINFO_x86 399a163ed8SThomas Gleixner#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 409a163ed8SThomas Gleixner#define X86_MODEL new_cpu_data+CPUINFO_x86_model 41b399151cSJia Zhang#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping 429a163ed8SThomas Gleixner#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 439a163ed8SThomas Gleixner#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 449a163ed8SThomas Gleixner#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 459a163ed8SThomas Gleixner#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 469a163ed8SThomas Gleixner 479a163ed8SThomas Gleixner 4822dc3918SJosh Poimboeuf#define SIZEOF_PTREGS 17*4 4922dc3918SJosh Poimboeuf 5004c17341SBorislav Petkov/* 51c090f532SJeremy Fitzhardinge * Worst-case size of the kernel mapping we need to make: 52147dd561SH. Peter Anvin * a relocatable kernel can live anywhere in lowmem, so we need to be able 53147dd561SH. Peter Anvin * to map all of lowmem. 54c090f532SJeremy Fitzhardinge */ 55147dd561SH. Peter AnvinKERNEL_PAGES = LOWMEM_PAGES 56c090f532SJeremy Fitzhardinge 577bf04be8SStratos PsomadakisINIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE 582bd2753fSYinghai LuRESERVE_BRK(pagetables, INIT_MAP_SIZE) 59796216a5SJeremy Fitzhardinge 609a163ed8SThomas Gleixner/* 619a163ed8SThomas Gleixner * 32-bit kernel entrypoint; only used by the boot CPU. On entry, 629a163ed8SThomas Gleixner * %esi points to the real-mode code as a 32-bit pointer. 639a163ed8SThomas Gleixner * CS and DS must be 4 GB flat segments, but we don't depend on 649a163ed8SThomas Gleixner * any particular GDT layout, because we load our own as soon as we 659a163ed8SThomas Gleixner * can. 669a163ed8SThomas Gleixner */ 674ae59b91STim Abbott__HEAD 6878762b0eSJiri SlabySYM_CODE_START(startup_32) 69b32f96c7SJosh Poimboeuf movl pa(initial_stack),%ecx 7011d4c3f9SH. Peter Anvin 719a163ed8SThomas Gleixner/* 729a163ed8SThomas Gleixner * Set segments to known values. 739a163ed8SThomas Gleixner */ 74551889a6SIan Campbell lgdt pa(boot_gdt_descr) 759a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 769a163ed8SThomas Gleixner movl %eax,%ds 779a163ed8SThomas Gleixner movl %eax,%es 789a163ed8SThomas Gleixner movl %eax,%fs 799a163ed8SThomas Gleixner movl %eax,%gs 8011d4c3f9SH. Peter Anvin movl %eax,%ss 8111d4c3f9SH. Peter Anvin leal -__PAGE_OFFSET(%ecx),%esp 829a163ed8SThomas Gleixner 839a163ed8SThomas Gleixner/* 849a163ed8SThomas Gleixner * Clear BSS first so that there are no surprises... 859a163ed8SThomas Gleixner */ 86a24e7851SRusty Russell cld 879a163ed8SThomas Gleixner xorl %eax,%eax 88551889a6SIan Campbell movl $pa(__bss_start),%edi 89551889a6SIan Campbell movl $pa(__bss_stop),%ecx 909a163ed8SThomas Gleixner subl %edi,%ecx 919a163ed8SThomas Gleixner shrl $2,%ecx 929a163ed8SThomas Gleixner rep ; stosl 939a163ed8SThomas Gleixner/* 949a163ed8SThomas Gleixner * Copy bootup parameters out of the way. 959a163ed8SThomas Gleixner * Note: %esi still has the pointer to the real-mode data. 969a163ed8SThomas Gleixner * With the kexec as boot loader, parameter segment might be loaded beyond 979a163ed8SThomas Gleixner * kernel image and might not even be addressable by early boot page tables. 989a163ed8SThomas Gleixner * (kexec on panic case). Hence copy out the parameters before initializing 999a163ed8SThomas Gleixner * page tables. 1009a163ed8SThomas Gleixner */ 101551889a6SIan Campbell movl $pa(boot_params),%edi 1029a163ed8SThomas Gleixner movl $(PARAM_SIZE/4),%ecx 1039a163ed8SThomas Gleixner cld 1049a163ed8SThomas Gleixner rep 1059a163ed8SThomas Gleixner movsl 106551889a6SIan Campbell movl pa(boot_params) + NEW_CL_POINTER,%esi 1079a163ed8SThomas Gleixner andl %esi,%esi 108b595076aSUwe Kleine-König jz 1f # No command line 109551889a6SIan Campbell movl $pa(boot_command_line),%edi 1109a163ed8SThomas Gleixner movl $(COMMAND_LINE_SIZE/4),%ecx 1119a163ed8SThomas Gleixner rep 1129a163ed8SThomas Gleixner movsl 1139a163ed8SThomas Gleixner1: 1149a163ed8SThomas Gleixner 115dc3119e7SThomas Gleixner#ifdef CONFIG_OLPC 116fd699c76SAndres Salomon /* save OFW's pgdir table for later use when calling into OFW */ 117fd699c76SAndres Salomon movl %cr3, %eax 118fd699c76SAndres Salomon movl %eax, pa(olpc_ofw_pgd) 119fd699c76SAndres Salomon#endif 120fd699c76SAndres Salomon 1211e620f9bSBoris Ostrovsky /* Create early pagetables. */ 1221e620f9bSBoris Ostrovsky call mk_early_pgtbl_32 1231e620f9bSBoris Ostrovsky 1241e620f9bSBoris Ostrovsky /* Do early initialization of the fixmap area */ 1251e620f9bSBoris Ostrovsky movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 126551889a6SIan Campbell#ifdef CONFIG_X86_PAE 12786b2b70eSJoe Korty#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 128b40827faSBorislav Petkov movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 1291e620f9bSBoris Ostrovsky#else 130b40827faSBorislav Petkov movl %eax,pa(initial_page_table+0xffc) 131551889a6SIan Campbell#endif 132d50d8fe1SRusty Russell 1331b00255fSJosh Poimboeuf jmp .Ldefault_entry 13478762b0eSJiri SlabySYM_CODE_END(startup_32) 135d50d8fe1SRusty Russell 1369a163ed8SThomas Gleixner/* 1379a163ed8SThomas Gleixner * Non-boot CPU entry point; entered from trampoline.S 1389a163ed8SThomas Gleixner * We can't lgdt here, because lgdt itself uses a data segment, but 1399a163ed8SThomas Gleixner * we know the trampoline has already loaded the boot_gdt for us. 1409a163ed8SThomas Gleixner * 1419a163ed8SThomas Gleixner * If cpu hotplug is not supported then this code can go in init section 1429a163ed8SThomas Gleixner * which will be freed later 1439a163ed8SThomas Gleixner */ 1446d685e53SJiri SlabySYM_FUNC_START(startup_32_smp) 1459a163ed8SThomas Gleixner cld 1469a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 1479a163ed8SThomas Gleixner movl %eax,%ds 1489a163ed8SThomas Gleixner movl %eax,%es 1499a163ed8SThomas Gleixner movl %eax,%fs 1509a163ed8SThomas Gleixner movl %eax,%gs 151b32f96c7SJosh Poimboeuf movl pa(initial_stack),%ecx 15211d4c3f9SH. Peter Anvin movl %eax,%ss 15311d4c3f9SH. Peter Anvin leal -__PAGE_OFFSET(%ecx),%esp 15448927bbbSJarkko Sakkinen 1551b00255fSJosh Poimboeuf.Ldefault_entry: 156021ef050SH. Peter Anvin movl $(CR0_STATE & ~X86_CR0_PG),%eax 157021ef050SH. Peter Anvin movl %eax,%cr0 158021ef050SH. Peter Anvin 1599a163ed8SThomas Gleixner/* 1609efb58deSBorislav Petkov * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave 1619efb58deSBorislav Petkov * bits like NT set. This would confuse the debugger if this code is traced. So 1629efb58deSBorislav Petkov * initialize them properly now before switching to protected mode. That means 1639efb58deSBorislav Petkov * DF in particular (even though we have cleared it earlier after copying the 1649efb58deSBorislav Petkov * command line) because GCC expects it. 1659a163ed8SThomas Gleixner */ 1665a5a51dbSH. Peter Anvin pushl $0 1675a5a51dbSH. Peter Anvin popfl 1689efb58deSBorislav Petkov 1699efb58deSBorislav Petkov/* 1709efb58deSBorislav Petkov * New page tables may be in 4Mbyte page mode and may be using the global pages. 1719efb58deSBorislav Petkov * 1729efb58deSBorislav Petkov * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists 1739efb58deSBorislav Petkov * if and only if CPUID exists and has flags other than the FPU flag set. 1749efb58deSBorislav Petkov */ 1759efb58deSBorislav Petkov movl $-1,pa(X86_CPUID) # preset CPUID level 1769efb58deSBorislav Petkov movl $X86_EFLAGS_ID,%ecx 1779efb58deSBorislav Petkov pushl %ecx 1789efb58deSBorislav Petkov popfl # set EFLAGS=ID 1795a5a51dbSH. Peter Anvin pushfl 1809efb58deSBorislav Petkov popl %eax # get EFLAGS 1819efb58deSBorislav Petkov testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? 1821b00255fSJosh Poimboeuf jz .Lenable_paging # hw disallowed setting of ID bit 1839efb58deSBorislav Petkov # which means no CPUID and no CR4 1849efb58deSBorislav Petkov 1859efb58deSBorislav Petkov xorl %eax,%eax 1869efb58deSBorislav Petkov cpuid 1879efb58deSBorislav Petkov movl %eax,pa(X86_CPUID) # save largest std CPUID function 1885a5a51dbSH. Peter Anvin 1896662c34fSH. Peter Anvin movl $1,%eax 1906662c34fSH. Peter Anvin cpuid 1916662c34fSH. Peter Anvin andl $~1,%edx # Ignore CPUID.FPU 1921b00255fSJosh Poimboeuf jz .Lenable_paging # No flags or only CPUID.FPU = no CR4 1936662c34fSH. Peter Anvin 1945a5a51dbSH. Peter Anvin movl pa(mmu_cr4_features),%eax 1959a163ed8SThomas Gleixner movl %eax,%cr4 1969a163ed8SThomas Gleixner 1978a50e513SH. Peter Anvin testb $X86_CR4_PAE, %al # check if PAE is enabled 1981b00255fSJosh Poimboeuf jz .Lenable_paging 1999a163ed8SThomas Gleixner 2009a163ed8SThomas Gleixner /* Check if extended functions are implemented */ 2019a163ed8SThomas Gleixner movl $0x80000000, %eax 2029a163ed8SThomas Gleixner cpuid 2038a50e513SH. Peter Anvin /* Value must be in the range 0x80000001 to 0x8000ffff */ 2048a50e513SH. Peter Anvin subl $0x80000001, %eax 2058a50e513SH. Peter Anvin cmpl $(0x8000ffff-0x80000001), %eax 2061b00255fSJosh Poimboeuf ja .Lenable_paging 207ebba638aSKees Cook 208ebba638aSKees Cook /* Clear bogus XD_DISABLE bits */ 209ebba638aSKees Cook call verify_cpu 210ebba638aSKees Cook 2119a163ed8SThomas Gleixner mov $0x80000001, %eax 2129a163ed8SThomas Gleixner cpuid 2139a163ed8SThomas Gleixner /* Execute Disable bit supported? */ 2148a50e513SH. Peter Anvin btl $(X86_FEATURE_NX & 31), %edx 2151b00255fSJosh Poimboeuf jnc .Lenable_paging 2169a163ed8SThomas Gleixner 2179a163ed8SThomas Gleixner /* Setup EFER (Extended Feature Enable Register) */ 2188a50e513SH. Peter Anvin movl $MSR_EFER, %ecx 2199a163ed8SThomas Gleixner rdmsr 2209a163ed8SThomas Gleixner 2218a50e513SH. Peter Anvin btsl $_EFER_NX, %eax 2229a163ed8SThomas Gleixner /* Make changes effective */ 2239a163ed8SThomas Gleixner wrmsr 2249a163ed8SThomas Gleixner 2251b00255fSJosh Poimboeuf.Lenable_paging: 2269a163ed8SThomas Gleixner 2279a163ed8SThomas Gleixner/* 2289a163ed8SThomas Gleixner * Enable paging 2299a163ed8SThomas Gleixner */ 230b40827faSBorislav Petkov movl $pa(initial_page_table), %eax 2319a163ed8SThomas Gleixner movl %eax,%cr3 /* set the page table pointer.. */ 232021ef050SH. Peter Anvin movl $CR0_STATE,%eax 2339a163ed8SThomas Gleixner movl %eax,%cr0 /* ..and set paging (PG) bit */ 2349a163ed8SThomas Gleixner ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 2359a163ed8SThomas Gleixner1: 23611d4c3f9SH. Peter Anvin /* Shift the stack pointer to a virtual address */ 23711d4c3f9SH. Peter Anvin addl $__PAGE_OFFSET, %esp 2389a163ed8SThomas Gleixner 2399a163ed8SThomas Gleixner/* 240166df91dSBorislav Petkov * Check if it is 486 2419a163ed8SThomas Gleixner */ 242237d1548SWang YanQing movb $4,X86 # at least 486 243c3a22a26SBorislav Petkov cmpl $-1,X86_CPUID 2441b00255fSJosh Poimboeuf je .Lis486 2459a163ed8SThomas Gleixner 2469a163ed8SThomas Gleixner /* get vendor info */ 2479a163ed8SThomas Gleixner xorl %eax,%eax # call CPUID with 0 -> return vendor ID 2489a163ed8SThomas Gleixner cpuid 2499a163ed8SThomas Gleixner movl %eax,X86_CPUID # save CPUID level 2509a163ed8SThomas Gleixner movl %ebx,X86_VENDOR_ID # lo 4 chars 2519a163ed8SThomas Gleixner movl %edx,X86_VENDOR_ID+4 # next 4 chars 2529a163ed8SThomas Gleixner movl %ecx,X86_VENDOR_ID+8 # last 4 chars 2539a163ed8SThomas Gleixner 2549a163ed8SThomas Gleixner orl %eax,%eax # do we have processor info as well? 2551b00255fSJosh Poimboeuf je .Lis486 2569a163ed8SThomas Gleixner 2579a163ed8SThomas Gleixner movl $1,%eax # Use the CPUID instruction to get CPU type 2589a163ed8SThomas Gleixner cpuid 2599a163ed8SThomas Gleixner movb %al,%cl # save reg for future use 2609a163ed8SThomas Gleixner andb $0x0f,%ah # mask processor family 2619a163ed8SThomas Gleixner movb %ah,X86 2629a163ed8SThomas Gleixner andb $0xf0,%al # mask model 2639a163ed8SThomas Gleixner shrb $4,%al 2649a163ed8SThomas Gleixner movb %al,X86_MODEL 2659a163ed8SThomas Gleixner andb $0x0f,%cl # mask mask revision 266b399151cSJia Zhang movb %cl,X86_STEPPING 2679a163ed8SThomas Gleixner movl %edx,X86_CAPABILITY 2689a163ed8SThomas Gleixner 2691b00255fSJosh Poimboeuf.Lis486: 270c3a22a26SBorislav Petkov movl $0x50022,%ecx # set AM, WP, NE and MP 271166df91dSBorislav Petkov movl %cr0,%eax 2729a163ed8SThomas Gleixner andl $0x80000011,%eax # Save PG,PE,ET 2739a163ed8SThomas Gleixner orl %ecx,%eax 2749a163ed8SThomas Gleixner movl %eax,%cr0 2759a163ed8SThomas Gleixner 2769a163ed8SThomas Gleixner lgdt early_gdt_descr 2779a163ed8SThomas Gleixner ljmp $(__KERNEL_CS),$1f 2789a163ed8SThomas Gleixner1: movl $(__KERNEL_DS),%eax # reload all the segment registers 2799a163ed8SThomas Gleixner movl %eax,%ss # after changing gdt. 2809a163ed8SThomas Gleixner 2819a163ed8SThomas Gleixner movl $(__USER_DS),%eax # DS/ES contains default USER segment 2829a163ed8SThomas Gleixner movl %eax,%ds 2839a163ed8SThomas Gleixner movl %eax,%es 2849a163ed8SThomas Gleixner 2850dd76d73SBrian Gerst movl $(__KERNEL_PERCPU), %eax 2860dd76d73SBrian Gerst movl %eax,%fs # set this cpu's percpu 2870dd76d73SBrian Gerst 2883fb0fdb3SAndy Lutomirski xorl %eax,%eax 2893fb0fdb3SAndy Lutomirski movl %eax,%gs # clear possible garbage in %gs 29060a5317fSTejun Heo 29160a5317fSTejun Heo xorl %eax,%eax # Clear LDT 2929a163ed8SThomas Gleixner lldt %ax 2939a163ed8SThomas Gleixner 2946616a147SJosh Poimboeuf call *(initial_code) 2956616a147SJosh Poimboeuf1: jmp 1b 2966d685e53SJiri SlabySYM_FUNC_END(startup_32_smp) 2979a163ed8SThomas Gleixner 2984c5023a3SH. Peter Anvin#include "verify_cpu.S" 2994c5023a3SH. Peter Anvin 3004c5023a3SH. Peter Anvin__INIT 3016d685e53SJiri SlabySYM_FUNC_START(early_idt_handler_array) 3024c5023a3SH. Peter Anvin # 36(%esp) %eflags 3034c5023a3SH. Peter Anvin # 32(%esp) %cs 3044c5023a3SH. Peter Anvin # 28(%esp) %eip 3054c5023a3SH. Peter Anvin # 24(%rsp) error code 3064c5023a3SH. Peter Anvin i = 0 3074c5023a3SH. Peter Anvin .rept NUM_EXCEPTION_VECTORS 30882c62fa0SJosh Poimboeuf .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 3094c5023a3SH. Peter Anvin pushl $0 # Dummy error code, to make stack frame uniform 3104c5023a3SH. Peter Anvin .endif 3114c5023a3SH. Peter Anvin pushl $i # 20(%esp) Vector number 312425be567SAndy Lutomirski jmp early_idt_handler_common 3134c5023a3SH. Peter Anvin i = i + 1 314425be567SAndy Lutomirski .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 3154c5023a3SH. Peter Anvin .endr 3166d685e53SJiri SlabySYM_FUNC_END(early_idt_handler_array) 3179a163ed8SThomas Gleixner 318ef77e688SJiri SlabySYM_CODE_START_LOCAL(early_idt_handler_common) 319425be567SAndy Lutomirski /* 320425be567SAndy Lutomirski * The stack is the hardware frame, an error code or zero, and the 321425be567SAndy Lutomirski * vector number. 322425be567SAndy Lutomirski */ 3239a163ed8SThomas Gleixner cld 3245fa10196SH. Peter Anvin 3254c5023a3SH. Peter Anvin incl %ss:early_recursion_flag 3264c5023a3SH. Peter Anvin 3277bbcdb1cSAndy Lutomirski /* The vector number is in pt_regs->gs */ 3284c5023a3SH. Peter Anvin 3297bbcdb1cSAndy Lutomirski cld 330630c1863SAndy Lutomirski pushl %fs /* pt_regs->fs (__fsh varies by model) */ 331630c1863SAndy Lutomirski pushl %es /* pt_regs->es (__esh varies by model) */ 332630c1863SAndy Lutomirski pushl %ds /* pt_regs->ds (__dsh varies by model) */ 3337bbcdb1cSAndy Lutomirski pushl %eax /* pt_regs->ax */ 3347bbcdb1cSAndy Lutomirski pushl %ebp /* pt_regs->bp */ 3357bbcdb1cSAndy Lutomirski pushl %edi /* pt_regs->di */ 3367bbcdb1cSAndy Lutomirski pushl %esi /* pt_regs->si */ 3377bbcdb1cSAndy Lutomirski pushl %edx /* pt_regs->dx */ 3387bbcdb1cSAndy Lutomirski pushl %ecx /* pt_regs->cx */ 3397bbcdb1cSAndy Lutomirski pushl %ebx /* pt_regs->bx */ 3407bbcdb1cSAndy Lutomirski 3417bbcdb1cSAndy Lutomirski /* Fix up DS and ES */ 3427bbcdb1cSAndy Lutomirski movl $(__KERNEL_DS), %ecx 3437bbcdb1cSAndy Lutomirski movl %ecx, %ds 3447bbcdb1cSAndy Lutomirski movl %ecx, %es 3457bbcdb1cSAndy Lutomirski 3467bbcdb1cSAndy Lutomirski /* Load the vector number into EDX */ 3477bbcdb1cSAndy Lutomirski movl PT_GS(%esp), %edx 3487bbcdb1cSAndy Lutomirski 349630c1863SAndy Lutomirski /* Load GS into pt_regs->gs (and maybe clobber __gsh) */ 3507bbcdb1cSAndy Lutomirski movw %gs, PT_GS(%esp) 3517bbcdb1cSAndy Lutomirski 3527bbcdb1cSAndy Lutomirski movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */ 3534c5023a3SH. Peter Anvin call early_fixup_exception 3547bbcdb1cSAndy Lutomirski 3557bbcdb1cSAndy Lutomirski popl %ebx /* pt_regs->bx */ 3567bbcdb1cSAndy Lutomirski popl %ecx /* pt_regs->cx */ 3577bbcdb1cSAndy Lutomirski popl %edx /* pt_regs->dx */ 3587bbcdb1cSAndy Lutomirski popl %esi /* pt_regs->si */ 3597bbcdb1cSAndy Lutomirski popl %edi /* pt_regs->di */ 3607bbcdb1cSAndy Lutomirski popl %ebp /* pt_regs->bp */ 3617bbcdb1cSAndy Lutomirski popl %eax /* pt_regs->ax */ 362630c1863SAndy Lutomirski popl %ds /* pt_regs->ds (always ignores __dsh) */ 363630c1863SAndy Lutomirski popl %es /* pt_regs->es (always ignores __esh) */ 364630c1863SAndy Lutomirski popl %fs /* pt_regs->fs (always ignores __fsh) */ 365630c1863SAndy Lutomirski popl %gs /* pt_regs->gs (always ignores __gsh) */ 3667bbcdb1cSAndy Lutomirski decl %ss:early_recursion_flag 3677bbcdb1cSAndy Lutomirski addl $4, %esp /* pop pt_regs->orig_ax */ 3687bbcdb1cSAndy Lutomirski iret 369ef77e688SJiri SlabySYM_CODE_END(early_idt_handler_common) 3704c5023a3SH. Peter Anvin 3719a163ed8SThomas Gleixner/* This is the default interrupt "handler" :-) */ 3726d685e53SJiri SlabySYM_FUNC_START(early_ignore_irq) 3739a163ed8SThomas Gleixner cld 3749a163ed8SThomas Gleixner#ifdef CONFIG_PRINTK 3759a163ed8SThomas Gleixner pushl %eax 3769a163ed8SThomas Gleixner pushl %ecx 3779a163ed8SThomas Gleixner pushl %edx 3789a163ed8SThomas Gleixner pushl %es 3799a163ed8SThomas Gleixner pushl %ds 3809a163ed8SThomas Gleixner movl $(__KERNEL_DS),%eax 3819a163ed8SThomas Gleixner movl %eax,%ds 3829a163ed8SThomas Gleixner movl %eax,%es 3839a163ed8SThomas Gleixner cmpl $2,early_recursion_flag 3849a163ed8SThomas Gleixner je hlt_loop 3859a163ed8SThomas Gleixner incl early_recursion_flag 3869a163ed8SThomas Gleixner pushl 16(%esp) 3879a163ed8SThomas Gleixner pushl 24(%esp) 3889a163ed8SThomas Gleixner pushl 32(%esp) 3899a163ed8SThomas Gleixner pushl 40(%esp) 3909a163ed8SThomas Gleixner pushl $int_msg 39133701557SChris Down call _printk 392d5e397cbSIngo Molnar 393d5e397cbSIngo Molnar call dump_stack 394d5e397cbSIngo Molnar 3959a163ed8SThomas Gleixner addl $(5*4),%esp 3969a163ed8SThomas Gleixner popl %ds 3979a163ed8SThomas Gleixner popl %es 3989a163ed8SThomas Gleixner popl %edx 3999a163ed8SThomas Gleixner popl %ecx 4009a163ed8SThomas Gleixner popl %eax 4019a163ed8SThomas Gleixner#endif 4029a163ed8SThomas Gleixner iret 4030e861fbbSAndy Lutomirski 4040e861fbbSAndy Lutomirskihlt_loop: 4050e861fbbSAndy Lutomirski hlt 4060e861fbbSAndy Lutomirski jmp hlt_loop 4076d685e53SJiri SlabySYM_FUNC_END(early_ignore_irq) 40804b5de3aSJiri Slaby 4094c5023a3SH. Peter Anvin__INITDATA 4104c5023a3SH. Peter Anvin .align 4 411b1bd27b9SJiri SlabySYM_DATA(early_recursion_flag, .long 0) 412ebba638aSKees Cook 4130e83815bSRobert Richter__REFDATA 414583323b9SThomas Gleixner .align 4 415b1bd27b9SJiri SlabySYM_DATA(initial_code, .long i386_start_kernel) 416583323b9SThomas Gleixner 417e3238fafSJoerg Roedel#ifdef CONFIG_PAGE_TABLE_ISOLATION 418e3238fafSJoerg Roedel#define PGD_ALIGN (2 * PAGE_SIZE) 419e3238fafSJoerg Roedel#define PTI_USER_PGD_FILL 1024 420e3238fafSJoerg Roedel#else 421e3238fafSJoerg Roedel#define PGD_ALIGN (PAGE_SIZE) 422e3238fafSJoerg Roedel#define PTI_USER_PGD_FILL 0 423e3238fafSJoerg Roedel#endif 4249a163ed8SThomas Gleixner/* 4259a163ed8SThomas Gleixner * BSS section 4269a163ed8SThomas Gleixner */ 42702b7da37STim Abbott__PAGE_ALIGNED_BSS 428e3238fafSJoerg Roedel .align PGD_ALIGN 429551889a6SIan Campbell#ifdef CONFIG_X86_PAE 4301e620f9bSBoris Ostrovsky.globl initial_pg_pmd 431d50d8fe1SRusty Russellinitial_pg_pmd: 432551889a6SIan Campbell .fill 1024*KPMDS,4,0 433551889a6SIan Campbell#else 434553bbc11SArnd Bergmann.globl initial_page_table 435553bbc11SArnd Bergmanninitial_page_table: 4369a163ed8SThomas Gleixner .fill 1024,4,0 437551889a6SIan Campbell#endif 438e3238fafSJoerg Roedel .align PGD_ALIGN 439d50d8fe1SRusty Russellinitial_pg_fixmap: 4409a163ed8SThomas Gleixner .fill 1024,4,0 441e3238fafSJoerg Roedel.globl swapper_pg_dir 442e3238fafSJoerg Roedel .align PGD_ALIGN 443e3238fafSJoerg Roedelswapper_pg_dir: 444e3238fafSJoerg Roedel .fill 1024,4,0 445e3238fafSJoerg Roedel .fill PTI_USER_PGD_FILL,4,0 446553bbc11SArnd Bergmann.globl empty_zero_page 447553bbc11SArnd Bergmannempty_zero_page: 4489a163ed8SThomas Gleixner .fill 4096,1,0 449784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page) 4502bd2753fSYinghai Lu 4519a163ed8SThomas Gleixner/* 4529a163ed8SThomas Gleixner * This starts the data section. 4539a163ed8SThomas Gleixner */ 454551889a6SIan Campbell#ifdef CONFIG_X86_PAE 455abe1ee3aSTim Abbott__PAGE_ALIGNED_DATA 456551889a6SIan Campbell /* Page-aligned for the benefit of paravirt? */ 457e3238fafSJoerg Roedel .align PGD_ALIGN 458b1bd27b9SJiri SlabySYM_DATA_START(initial_page_table) 459b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ 460551889a6SIan Campbell# if KPMDS == 3 461b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 462b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 463b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 464551889a6SIan Campbell# elif KPMDS == 2 465551889a6SIan Campbell .long 0,0 466b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 467b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 468551889a6SIan Campbell# elif KPMDS == 1 469551889a6SIan Campbell .long 0,0 470551889a6SIan Campbell .long 0,0 471b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 472551889a6SIan Campbell# else 473551889a6SIan Campbell# error "Kernel PMDs should be 1, 2 or 3" 474551889a6SIan Campbell# endif 4757bf04be8SStratos Psomadakis .align PAGE_SIZE /* needs to be page-sized too */ 476f490e07cSThomas Gleixner 477f490e07cSThomas Gleixner#ifdef CONFIG_PAGE_TABLE_ISOLATION 478f490e07cSThomas Gleixner /* 479f490e07cSThomas Gleixner * PTI needs another page so sync_initial_pagetable() works correctly 480f490e07cSThomas Gleixner * and does not scribble over the data which is placed behind the 481f490e07cSThomas Gleixner * actual initial_page_table. See clone_pgd_range(). 482f490e07cSThomas Gleixner */ 483f490e07cSThomas Gleixner .fill 1024, 4, 0 484f490e07cSThomas Gleixner#endif 485f490e07cSThomas Gleixner 486b1bd27b9SJiri SlabySYM_DATA_END(initial_page_table) 487551889a6SIan Campbell#endif 488551889a6SIan Campbell 4899a163ed8SThomas Gleixner.data 49011d4c3f9SH. Peter Anvin.balign 4 49122dc3918SJosh Poimboeuf/* 492b1bd27b9SJiri Slaby * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder 493b1bd27b9SJiri Slaby * reliably detect the end of the stack. 49422dc3918SJosh Poimboeuf */ 495b1bd27b9SJiri SlabySYM_DATA(initial_stack, 496b1bd27b9SJiri Slaby .long init_thread_union + THREAD_SIZE - 497b1bd27b9SJiri Slaby SIZEOF_PTREGS - TOP_OF_KERNEL_STACK_PADDING) 4989a163ed8SThomas Gleixner 4994c5023a3SH. Peter Anvin__INITRODATA 5009a163ed8SThomas Gleixnerint_msg: 501d5e397cbSIngo Molnar .asciz "Unknown interrupt or fault at: %p %p %p\n" 5029a163ed8SThomas Gleixner 5039a163ed8SThomas Gleixner#include "../../x86/xen/xen-head.S" 5049a163ed8SThomas Gleixner 5059a163ed8SThomas Gleixner/* 5069a163ed8SThomas Gleixner * The IDT and GDT 'descriptors' are a strange 48-bit object 5079a163ed8SThomas Gleixner * only used by the lidt and lgdt instructions. They are not 5089a163ed8SThomas Gleixner * like usual segment descriptors - they consist of a 16-bit 5099a163ed8SThomas Gleixner * segment size, and 32-bit linear address value: 5109a163ed8SThomas Gleixner */ 5119a163ed8SThomas Gleixner 5124c5023a3SH. Peter Anvin .data 5139a163ed8SThomas Gleixner ALIGN 5149a163ed8SThomas Gleixner# early boot GDT descriptor (must use 1:1 address mapping) 5159a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 516b1bd27b9SJiri SlabySYM_DATA_START_LOCAL(boot_gdt_descr) 5179a163ed8SThomas Gleixner .word __BOOT_DS+7 5189a163ed8SThomas Gleixner .long boot_gdt - __PAGE_OFFSET 519b1bd27b9SJiri SlabySYM_DATA_END(boot_gdt_descr) 5209a163ed8SThomas Gleixner 5219a163ed8SThomas Gleixner# boot GDT descriptor (later on used by CPU#0): 5229a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 523b1bd27b9SJiri SlabySYM_DATA_START(early_gdt_descr) 5249a163ed8SThomas Gleixner .word GDT_ENTRIES*8-1 525dd17c8f7SRusty Russell .long gdt_page /* Overwritten for secondary CPUs */ 526b1bd27b9SJiri SlabySYM_DATA_END(early_gdt_descr) 5279a163ed8SThomas Gleixner 5289a163ed8SThomas Gleixner/* 5299a163ed8SThomas Gleixner * The boot_gdt must mirror the equivalent in setup.S and is 5309a163ed8SThomas Gleixner * used only for booting. 5319a163ed8SThomas Gleixner */ 5329a163ed8SThomas Gleixner .align L1_CACHE_BYTES 533b1bd27b9SJiri SlabySYM_DATA_START(boot_gdt) 5349a163ed8SThomas Gleixner .fill GDT_ENTRY_BOOT_CS,8,0 5359a163ed8SThomas Gleixner .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ 5369a163ed8SThomas Gleixner .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ 537b1bd27b9SJiri SlabySYM_DATA_END(boot_gdt) 538