1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 29a163ed8SThomas Gleixner/* 39a163ed8SThomas Gleixner * 49a163ed8SThomas Gleixner * Copyright (C) 1991, 1992 Linus Torvalds 59a163ed8SThomas Gleixner * 69a163ed8SThomas Gleixner * Enhanced CPU detection and feature setting code by Mike Jagdis 79a163ed8SThomas Gleixner * and Martin Mares, November 1997. 89a163ed8SThomas Gleixner */ 99a163ed8SThomas Gleixner 109a163ed8SThomas Gleixner.text 119a163ed8SThomas Gleixner#include <linux/threads.h> 128b2f7fffSSam Ravnborg#include <linux/init.h> 139a163ed8SThomas Gleixner#include <linux/linkage.h> 149a163ed8SThomas Gleixner#include <asm/segment.h> 150341c14dSJeremy Fitzhardinge#include <asm/page_types.h> 160341c14dSJeremy Fitzhardinge#include <asm/pgtable_types.h> 179a163ed8SThomas Gleixner#include <asm/cache.h> 189a163ed8SThomas Gleixner#include <asm/thread_info.h> 199a163ed8SThomas Gleixner#include <asm/asm-offsets.h> 209a163ed8SThomas Gleixner#include <asm/setup.h> 21551889a6SIan Campbell#include <asm/processor-flags.h> 228a50e513SH. Peter Anvin#include <asm/msr-index.h> 23cd4d09ecSBorislav Petkov#include <asm/cpufeatures.h> 2460a5317fSTejun Heo#include <asm/percpu.h> 254c5023a3SH. Peter Anvin#include <asm/nops.h> 26*3131ef39SJiri Slaby#include <asm/nospec-branch.h> 27fb148d83SAlexander Kuleshov#include <asm/bootparam.h> 28784d5699SAl Viro#include <asm/export.h> 291e620f9bSBoris Ostrovsky#include <asm/pgtable_32.h> 30551889a6SIan Campbell 31551889a6SIan Campbell/* Physical address */ 32551889a6SIan Campbell#define pa(X) ((X) - __PAGE_OFFSET) 339a163ed8SThomas Gleixner 349a163ed8SThomas Gleixner/* 359a163ed8SThomas Gleixner * References to members of the new_cpu_data structure. 369a163ed8SThomas Gleixner */ 379a163ed8SThomas Gleixner 389a163ed8SThomas Gleixner#define X86 new_cpu_data+CPUINFO_x86 399a163ed8SThomas Gleixner#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 409a163ed8SThomas Gleixner#define X86_MODEL new_cpu_data+CPUINFO_x86_model 41b399151cSJia Zhang#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping 429a163ed8SThomas Gleixner#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 439a163ed8SThomas Gleixner#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 449a163ed8SThomas Gleixner#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 459a163ed8SThomas Gleixner#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 469a163ed8SThomas Gleixner 479a163ed8SThomas Gleixner 4822dc3918SJosh Poimboeuf#define SIZEOF_PTREGS 17*4 4922dc3918SJosh Poimboeuf 5004c17341SBorislav Petkov/* 51c090f532SJeremy Fitzhardinge * Worst-case size of the kernel mapping we need to make: 52147dd561SH. Peter Anvin * a relocatable kernel can live anywhere in lowmem, so we need to be able 53147dd561SH. Peter Anvin * to map all of lowmem. 54c090f532SJeremy Fitzhardinge */ 55147dd561SH. Peter AnvinKERNEL_PAGES = LOWMEM_PAGES 56c090f532SJeremy Fitzhardinge 577bf04be8SStratos PsomadakisINIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE 582bd2753fSYinghai LuRESERVE_BRK(pagetables, INIT_MAP_SIZE) 59796216a5SJeremy Fitzhardinge 609a163ed8SThomas Gleixner/* 619a163ed8SThomas Gleixner * 32-bit kernel entrypoint; only used by the boot CPU. On entry, 629a163ed8SThomas Gleixner * %esi points to the real-mode code as a 32-bit pointer. 639a163ed8SThomas Gleixner * CS and DS must be 4 GB flat segments, but we don't depend on 649a163ed8SThomas Gleixner * any particular GDT layout, because we load our own as soon as we 659a163ed8SThomas Gleixner * can. 669a163ed8SThomas Gleixner */ 674ae59b91STim Abbott__HEAD 6878762b0eSJiri SlabySYM_CODE_START(startup_32) 69b32f96c7SJosh Poimboeuf movl pa(initial_stack),%ecx 7011d4c3f9SH. Peter Anvin 719a163ed8SThomas Gleixner/* 729a163ed8SThomas Gleixner * Set segments to known values. 739a163ed8SThomas Gleixner */ 74551889a6SIan Campbell lgdt pa(boot_gdt_descr) 759a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 769a163ed8SThomas Gleixner movl %eax,%ds 779a163ed8SThomas Gleixner movl %eax,%es 789a163ed8SThomas Gleixner movl %eax,%fs 799a163ed8SThomas Gleixner movl %eax,%gs 8011d4c3f9SH. Peter Anvin movl %eax,%ss 8111d4c3f9SH. Peter Anvin leal -__PAGE_OFFSET(%ecx),%esp 829a163ed8SThomas Gleixner 839a163ed8SThomas Gleixner/* 849a163ed8SThomas Gleixner * Clear BSS first so that there are no surprises... 859a163ed8SThomas Gleixner */ 86a24e7851SRusty Russell cld 879a163ed8SThomas Gleixner xorl %eax,%eax 88551889a6SIan Campbell movl $pa(__bss_start),%edi 89551889a6SIan Campbell movl $pa(__bss_stop),%ecx 909a163ed8SThomas Gleixner subl %edi,%ecx 919a163ed8SThomas Gleixner shrl $2,%ecx 929a163ed8SThomas Gleixner rep ; stosl 939a163ed8SThomas Gleixner/* 949a163ed8SThomas Gleixner * Copy bootup parameters out of the way. 959a163ed8SThomas Gleixner * Note: %esi still has the pointer to the real-mode data. 969a163ed8SThomas Gleixner * With the kexec as boot loader, parameter segment might be loaded beyond 979a163ed8SThomas Gleixner * kernel image and might not even be addressable by early boot page tables. 989a163ed8SThomas Gleixner * (kexec on panic case). Hence copy out the parameters before initializing 999a163ed8SThomas Gleixner * page tables. 1009a163ed8SThomas Gleixner */ 101551889a6SIan Campbell movl $pa(boot_params),%edi 1029a163ed8SThomas Gleixner movl $(PARAM_SIZE/4),%ecx 1039a163ed8SThomas Gleixner cld 1049a163ed8SThomas Gleixner rep 1059a163ed8SThomas Gleixner movsl 106551889a6SIan Campbell movl pa(boot_params) + NEW_CL_POINTER,%esi 1079a163ed8SThomas Gleixner andl %esi,%esi 108b595076aSUwe Kleine-König jz 1f # No command line 109551889a6SIan Campbell movl $pa(boot_command_line),%edi 1109a163ed8SThomas Gleixner movl $(COMMAND_LINE_SIZE/4),%ecx 1119a163ed8SThomas Gleixner rep 1129a163ed8SThomas Gleixner movsl 1139a163ed8SThomas Gleixner1: 1149a163ed8SThomas Gleixner 115dc3119e7SThomas Gleixner#ifdef CONFIG_OLPC 116fd699c76SAndres Salomon /* save OFW's pgdir table for later use when calling into OFW */ 117fd699c76SAndres Salomon movl %cr3, %eax 118fd699c76SAndres Salomon movl %eax, pa(olpc_ofw_pgd) 119fd699c76SAndres Salomon#endif 120fd699c76SAndres Salomon 121fe055896SBorislav Petkov#ifdef CONFIG_MICROCODE 12263b553c6SFenghua Yu /* Early load ucode on BSP. */ 12363b553c6SFenghua Yu call load_ucode_bsp 12463b553c6SFenghua Yu#endif 12563b553c6SFenghua Yu 1261e620f9bSBoris Ostrovsky /* Create early pagetables. */ 1271e620f9bSBoris Ostrovsky call mk_early_pgtbl_32 1281e620f9bSBoris Ostrovsky 1291e620f9bSBoris Ostrovsky /* Do early initialization of the fixmap area */ 1301e620f9bSBoris Ostrovsky movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 131551889a6SIan Campbell#ifdef CONFIG_X86_PAE 13286b2b70eSJoe Korty#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 133b40827faSBorislav Petkov movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 1341e620f9bSBoris Ostrovsky#else 135b40827faSBorislav Petkov movl %eax,pa(initial_page_table+0xffc) 136551889a6SIan Campbell#endif 137d50d8fe1SRusty Russell 1381b00255fSJosh Poimboeuf jmp .Ldefault_entry 13978762b0eSJiri SlabySYM_CODE_END(startup_32) 140d50d8fe1SRusty Russell 1419a163ed8SThomas Gleixner/* 1429a163ed8SThomas Gleixner * Non-boot CPU entry point; entered from trampoline.S 1439a163ed8SThomas Gleixner * We can't lgdt here, because lgdt itself uses a data segment, but 1449a163ed8SThomas Gleixner * we know the trampoline has already loaded the boot_gdt for us. 1459a163ed8SThomas Gleixner * 1469a163ed8SThomas Gleixner * If cpu hotplug is not supported then this code can go in init section 1479a163ed8SThomas Gleixner * which will be freed later 1489a163ed8SThomas Gleixner */ 1496d685e53SJiri SlabySYM_FUNC_START(startup_32_smp) 1509a163ed8SThomas Gleixner cld 1519a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 1529a163ed8SThomas Gleixner movl %eax,%ds 1539a163ed8SThomas Gleixner movl %eax,%es 1549a163ed8SThomas Gleixner movl %eax,%fs 1559a163ed8SThomas Gleixner movl %eax,%gs 156b32f96c7SJosh Poimboeuf movl pa(initial_stack),%ecx 15711d4c3f9SH. Peter Anvin movl %eax,%ss 15811d4c3f9SH. Peter Anvin leal -__PAGE_OFFSET(%ecx),%esp 15948927bbbSJarkko Sakkinen 160fe055896SBorislav Petkov#ifdef CONFIG_MICROCODE 16163b553c6SFenghua Yu /* Early load ucode on AP. */ 16263b553c6SFenghua Yu call load_ucode_ap 16363b553c6SFenghua Yu#endif 16463b553c6SFenghua Yu 1651b00255fSJosh Poimboeuf.Ldefault_entry: 166021ef050SH. Peter Anvin movl $(CR0_STATE & ~X86_CR0_PG),%eax 167021ef050SH. Peter Anvin movl %eax,%cr0 168021ef050SH. Peter Anvin 1699a163ed8SThomas Gleixner/* 1709efb58deSBorislav Petkov * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave 1719efb58deSBorislav Petkov * bits like NT set. This would confuse the debugger if this code is traced. So 1729efb58deSBorislav Petkov * initialize them properly now before switching to protected mode. That means 1739efb58deSBorislav Petkov * DF in particular (even though we have cleared it earlier after copying the 1749efb58deSBorislav Petkov * command line) because GCC expects it. 1759a163ed8SThomas Gleixner */ 1765a5a51dbSH. Peter Anvin pushl $0 1775a5a51dbSH. Peter Anvin popfl 1789efb58deSBorislav Petkov 1799efb58deSBorislav Petkov/* 1809efb58deSBorislav Petkov * New page tables may be in 4Mbyte page mode and may be using the global pages. 1819efb58deSBorislav Petkov * 1829efb58deSBorislav Petkov * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists 1839efb58deSBorislav Petkov * if and only if CPUID exists and has flags other than the FPU flag set. 1849efb58deSBorislav Petkov */ 1859efb58deSBorislav Petkov movl $-1,pa(X86_CPUID) # preset CPUID level 1869efb58deSBorislav Petkov movl $X86_EFLAGS_ID,%ecx 1879efb58deSBorislav Petkov pushl %ecx 1889efb58deSBorislav Petkov popfl # set EFLAGS=ID 1895a5a51dbSH. Peter Anvin pushfl 1909efb58deSBorislav Petkov popl %eax # get EFLAGS 1919efb58deSBorislav Petkov testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? 1921b00255fSJosh Poimboeuf jz .Lenable_paging # hw disallowed setting of ID bit 1939efb58deSBorislav Petkov # which means no CPUID and no CR4 1949efb58deSBorislav Petkov 1959efb58deSBorislav Petkov xorl %eax,%eax 1969efb58deSBorislav Petkov cpuid 1979efb58deSBorislav Petkov movl %eax,pa(X86_CPUID) # save largest std CPUID function 1985a5a51dbSH. Peter Anvin 1996662c34fSH. Peter Anvin movl $1,%eax 2006662c34fSH. Peter Anvin cpuid 2016662c34fSH. Peter Anvin andl $~1,%edx # Ignore CPUID.FPU 2021b00255fSJosh Poimboeuf jz .Lenable_paging # No flags or only CPUID.FPU = no CR4 2036662c34fSH. Peter Anvin 2045a5a51dbSH. Peter Anvin movl pa(mmu_cr4_features),%eax 2059a163ed8SThomas Gleixner movl %eax,%cr4 2069a163ed8SThomas Gleixner 2078a50e513SH. Peter Anvin testb $X86_CR4_PAE, %al # check if PAE is enabled 2081b00255fSJosh Poimboeuf jz .Lenable_paging 2099a163ed8SThomas Gleixner 2109a163ed8SThomas Gleixner /* Check if extended functions are implemented */ 2119a163ed8SThomas Gleixner movl $0x80000000, %eax 2129a163ed8SThomas Gleixner cpuid 2138a50e513SH. Peter Anvin /* Value must be in the range 0x80000001 to 0x8000ffff */ 2148a50e513SH. Peter Anvin subl $0x80000001, %eax 2158a50e513SH. Peter Anvin cmpl $(0x8000ffff-0x80000001), %eax 2161b00255fSJosh Poimboeuf ja .Lenable_paging 217ebba638aSKees Cook 218ebba638aSKees Cook /* Clear bogus XD_DISABLE bits */ 219ebba638aSKees Cook call verify_cpu 220ebba638aSKees Cook 2219a163ed8SThomas Gleixner mov $0x80000001, %eax 2229a163ed8SThomas Gleixner cpuid 2239a163ed8SThomas Gleixner /* Execute Disable bit supported? */ 2248a50e513SH. Peter Anvin btl $(X86_FEATURE_NX & 31), %edx 2251b00255fSJosh Poimboeuf jnc .Lenable_paging 2269a163ed8SThomas Gleixner 2279a163ed8SThomas Gleixner /* Setup EFER (Extended Feature Enable Register) */ 2288a50e513SH. Peter Anvin movl $MSR_EFER, %ecx 2299a163ed8SThomas Gleixner rdmsr 2309a163ed8SThomas Gleixner 2318a50e513SH. Peter Anvin btsl $_EFER_NX, %eax 2329a163ed8SThomas Gleixner /* Make changes effective */ 2339a163ed8SThomas Gleixner wrmsr 2349a163ed8SThomas Gleixner 2351b00255fSJosh Poimboeuf.Lenable_paging: 2369a163ed8SThomas Gleixner 2379a163ed8SThomas Gleixner/* 2389a163ed8SThomas Gleixner * Enable paging 2399a163ed8SThomas Gleixner */ 240b40827faSBorislav Petkov movl $pa(initial_page_table), %eax 2419a163ed8SThomas Gleixner movl %eax,%cr3 /* set the page table pointer.. */ 242021ef050SH. Peter Anvin movl $CR0_STATE,%eax 2439a163ed8SThomas Gleixner movl %eax,%cr0 /* ..and set paging (PG) bit */ 2449a163ed8SThomas Gleixner ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 2459a163ed8SThomas Gleixner1: 24611d4c3f9SH. Peter Anvin /* Shift the stack pointer to a virtual address */ 24711d4c3f9SH. Peter Anvin addl $__PAGE_OFFSET, %esp 2489a163ed8SThomas Gleixner 2499a163ed8SThomas Gleixner/* 250166df91dSBorislav Petkov * Check if it is 486 2519a163ed8SThomas Gleixner */ 252237d1548SWang YanQing movb $4,X86 # at least 486 253c3a22a26SBorislav Petkov cmpl $-1,X86_CPUID 2541b00255fSJosh Poimboeuf je .Lis486 2559a163ed8SThomas Gleixner 2569a163ed8SThomas Gleixner /* get vendor info */ 2579a163ed8SThomas Gleixner xorl %eax,%eax # call CPUID with 0 -> return vendor ID 2589a163ed8SThomas Gleixner cpuid 2599a163ed8SThomas Gleixner movl %eax,X86_CPUID # save CPUID level 2609a163ed8SThomas Gleixner movl %ebx,X86_VENDOR_ID # lo 4 chars 2619a163ed8SThomas Gleixner movl %edx,X86_VENDOR_ID+4 # next 4 chars 2629a163ed8SThomas Gleixner movl %ecx,X86_VENDOR_ID+8 # last 4 chars 2639a163ed8SThomas Gleixner 2649a163ed8SThomas Gleixner orl %eax,%eax # do we have processor info as well? 2651b00255fSJosh Poimboeuf je .Lis486 2669a163ed8SThomas Gleixner 2679a163ed8SThomas Gleixner movl $1,%eax # Use the CPUID instruction to get CPU type 2689a163ed8SThomas Gleixner cpuid 2699a163ed8SThomas Gleixner movb %al,%cl # save reg for future use 2709a163ed8SThomas Gleixner andb $0x0f,%ah # mask processor family 2719a163ed8SThomas Gleixner movb %ah,X86 2729a163ed8SThomas Gleixner andb $0xf0,%al # mask model 2739a163ed8SThomas Gleixner shrb $4,%al 2749a163ed8SThomas Gleixner movb %al,X86_MODEL 2759a163ed8SThomas Gleixner andb $0x0f,%cl # mask mask revision 276b399151cSJia Zhang movb %cl,X86_STEPPING 2779a163ed8SThomas Gleixner movl %edx,X86_CAPABILITY 2789a163ed8SThomas Gleixner 2791b00255fSJosh Poimboeuf.Lis486: 280c3a22a26SBorislav Petkov movl $0x50022,%ecx # set AM, WP, NE and MP 281166df91dSBorislav Petkov movl %cr0,%eax 2829a163ed8SThomas Gleixner andl $0x80000011,%eax # Save PG,PE,ET 2839a163ed8SThomas Gleixner orl %ecx,%eax 2849a163ed8SThomas Gleixner movl %eax,%cr0 2859a163ed8SThomas Gleixner 2869a163ed8SThomas Gleixner lgdt early_gdt_descr 2879a163ed8SThomas Gleixner ljmp $(__KERNEL_CS),$1f 2889a163ed8SThomas Gleixner1: movl $(__KERNEL_DS),%eax # reload all the segment registers 2899a163ed8SThomas Gleixner movl %eax,%ss # after changing gdt. 2909a163ed8SThomas Gleixner 2919a163ed8SThomas Gleixner movl $(__USER_DS),%eax # DS/ES contains default USER segment 2929a163ed8SThomas Gleixner movl %eax,%ds 2939a163ed8SThomas Gleixner movl %eax,%es 2949a163ed8SThomas Gleixner 2950dd76d73SBrian Gerst movl $(__KERNEL_PERCPU), %eax 2960dd76d73SBrian Gerst movl %eax,%fs # set this cpu's percpu 2970dd76d73SBrian Gerst 2983fb0fdb3SAndy Lutomirski xorl %eax,%eax 2993fb0fdb3SAndy Lutomirski movl %eax,%gs # clear possible garbage in %gs 30060a5317fSTejun Heo 30160a5317fSTejun Heo xorl %eax,%eax # Clear LDT 3029a163ed8SThomas Gleixner lldt %ax 3039a163ed8SThomas Gleixner 3046616a147SJosh Poimboeuf call *(initial_code) 3056616a147SJosh Poimboeuf1: jmp 1b 3066d685e53SJiri SlabySYM_FUNC_END(startup_32_smp) 3079a163ed8SThomas Gleixner 3084c5023a3SH. Peter Anvin#include "verify_cpu.S" 3094c5023a3SH. Peter Anvin 3104c5023a3SH. Peter Anvin__INIT 3116d685e53SJiri SlabySYM_FUNC_START(early_idt_handler_array) 3124c5023a3SH. Peter Anvin # 36(%esp) %eflags 3134c5023a3SH. Peter Anvin # 32(%esp) %cs 3144c5023a3SH. Peter Anvin # 28(%esp) %eip 3154c5023a3SH. Peter Anvin # 24(%rsp) error code 3164c5023a3SH. Peter Anvin i = 0 3174c5023a3SH. Peter Anvin .rept NUM_EXCEPTION_VECTORS 31882c62fa0SJosh Poimboeuf .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 3194c5023a3SH. Peter Anvin pushl $0 # Dummy error code, to make stack frame uniform 3204c5023a3SH. Peter Anvin .endif 3214c5023a3SH. Peter Anvin pushl $i # 20(%esp) Vector number 322425be567SAndy Lutomirski jmp early_idt_handler_common 3234c5023a3SH. Peter Anvin i = i + 1 324425be567SAndy Lutomirski .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 3254c5023a3SH. Peter Anvin .endr 3266d685e53SJiri SlabySYM_FUNC_END(early_idt_handler_array) 3279a163ed8SThomas Gleixner 328ef77e688SJiri SlabySYM_CODE_START_LOCAL(early_idt_handler_common) 329425be567SAndy Lutomirski /* 330425be567SAndy Lutomirski * The stack is the hardware frame, an error code or zero, and the 331425be567SAndy Lutomirski * vector number. 332425be567SAndy Lutomirski */ 3339a163ed8SThomas Gleixner cld 3345fa10196SH. Peter Anvin 3354c5023a3SH. Peter Anvin incl %ss:early_recursion_flag 3364c5023a3SH. Peter Anvin 3377bbcdb1cSAndy Lutomirski /* The vector number is in pt_regs->gs */ 3384c5023a3SH. Peter Anvin 3397bbcdb1cSAndy Lutomirski cld 340630c1863SAndy Lutomirski pushl %fs /* pt_regs->fs (__fsh varies by model) */ 341630c1863SAndy Lutomirski pushl %es /* pt_regs->es (__esh varies by model) */ 342630c1863SAndy Lutomirski pushl %ds /* pt_regs->ds (__dsh varies by model) */ 3437bbcdb1cSAndy Lutomirski pushl %eax /* pt_regs->ax */ 3447bbcdb1cSAndy Lutomirski pushl %ebp /* pt_regs->bp */ 3457bbcdb1cSAndy Lutomirski pushl %edi /* pt_regs->di */ 3467bbcdb1cSAndy Lutomirski pushl %esi /* pt_regs->si */ 3477bbcdb1cSAndy Lutomirski pushl %edx /* pt_regs->dx */ 3487bbcdb1cSAndy Lutomirski pushl %ecx /* pt_regs->cx */ 3497bbcdb1cSAndy Lutomirski pushl %ebx /* pt_regs->bx */ 3507bbcdb1cSAndy Lutomirski 3517bbcdb1cSAndy Lutomirski /* Fix up DS and ES */ 3527bbcdb1cSAndy Lutomirski movl $(__KERNEL_DS), %ecx 3537bbcdb1cSAndy Lutomirski movl %ecx, %ds 3547bbcdb1cSAndy Lutomirski movl %ecx, %es 3557bbcdb1cSAndy Lutomirski 3567bbcdb1cSAndy Lutomirski /* Load the vector number into EDX */ 3577bbcdb1cSAndy Lutomirski movl PT_GS(%esp), %edx 3587bbcdb1cSAndy Lutomirski 359630c1863SAndy Lutomirski /* Load GS into pt_regs->gs (and maybe clobber __gsh) */ 3607bbcdb1cSAndy Lutomirski movw %gs, PT_GS(%esp) 3617bbcdb1cSAndy Lutomirski 3627bbcdb1cSAndy Lutomirski movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */ 3634c5023a3SH. Peter Anvin call early_fixup_exception 3647bbcdb1cSAndy Lutomirski 3657bbcdb1cSAndy Lutomirski popl %ebx /* pt_regs->bx */ 3667bbcdb1cSAndy Lutomirski popl %ecx /* pt_regs->cx */ 3677bbcdb1cSAndy Lutomirski popl %edx /* pt_regs->dx */ 3687bbcdb1cSAndy Lutomirski popl %esi /* pt_regs->si */ 3697bbcdb1cSAndy Lutomirski popl %edi /* pt_regs->di */ 3707bbcdb1cSAndy Lutomirski popl %ebp /* pt_regs->bp */ 3717bbcdb1cSAndy Lutomirski popl %eax /* pt_regs->ax */ 372630c1863SAndy Lutomirski popl %ds /* pt_regs->ds (always ignores __dsh) */ 373630c1863SAndy Lutomirski popl %es /* pt_regs->es (always ignores __esh) */ 374630c1863SAndy Lutomirski popl %fs /* pt_regs->fs (always ignores __fsh) */ 375630c1863SAndy Lutomirski popl %gs /* pt_regs->gs (always ignores __gsh) */ 3767bbcdb1cSAndy Lutomirski decl %ss:early_recursion_flag 3777bbcdb1cSAndy Lutomirski addl $4, %esp /* pop pt_regs->orig_ax */ 3787bbcdb1cSAndy Lutomirski iret 379ef77e688SJiri SlabySYM_CODE_END(early_idt_handler_common) 3804c5023a3SH. Peter Anvin 3819a163ed8SThomas Gleixner/* This is the default interrupt "handler" :-) */ 3826d685e53SJiri SlabySYM_FUNC_START(early_ignore_irq) 3839a163ed8SThomas Gleixner cld 3849a163ed8SThomas Gleixner#ifdef CONFIG_PRINTK 3859a163ed8SThomas Gleixner pushl %eax 3869a163ed8SThomas Gleixner pushl %ecx 3879a163ed8SThomas Gleixner pushl %edx 3889a163ed8SThomas Gleixner pushl %es 3899a163ed8SThomas Gleixner pushl %ds 3909a163ed8SThomas Gleixner movl $(__KERNEL_DS),%eax 3919a163ed8SThomas Gleixner movl %eax,%ds 3929a163ed8SThomas Gleixner movl %eax,%es 3939a163ed8SThomas Gleixner cmpl $2,early_recursion_flag 3949a163ed8SThomas Gleixner je hlt_loop 3959a163ed8SThomas Gleixner incl early_recursion_flag 3969a163ed8SThomas Gleixner pushl 16(%esp) 3979a163ed8SThomas Gleixner pushl 24(%esp) 3989a163ed8SThomas Gleixner pushl 32(%esp) 3999a163ed8SThomas Gleixner pushl 40(%esp) 4009a163ed8SThomas Gleixner pushl $int_msg 40133701557SChris Down call _printk 402d5e397cbSIngo Molnar 403d5e397cbSIngo Molnar call dump_stack 404d5e397cbSIngo Molnar 4059a163ed8SThomas Gleixner addl $(5*4),%esp 4069a163ed8SThomas Gleixner popl %ds 4079a163ed8SThomas Gleixner popl %es 4089a163ed8SThomas Gleixner popl %edx 4099a163ed8SThomas Gleixner popl %ecx 4109a163ed8SThomas Gleixner popl %eax 4119a163ed8SThomas Gleixner#endif 4129a163ed8SThomas Gleixner iret 4130e861fbbSAndy Lutomirski 4140e861fbbSAndy Lutomirskihlt_loop: 4150e861fbbSAndy Lutomirski hlt 4160e861fbbSAndy Lutomirski jmp hlt_loop 4176d685e53SJiri SlabySYM_FUNC_END(early_ignore_irq) 41804b5de3aSJiri Slaby 4194c5023a3SH. Peter Anvin__INITDATA 4204c5023a3SH. Peter Anvin .align 4 421b1bd27b9SJiri SlabySYM_DATA(early_recursion_flag, .long 0) 422ebba638aSKees Cook 4230e83815bSRobert Richter__REFDATA 424583323b9SThomas Gleixner .align 4 425b1bd27b9SJiri SlabySYM_DATA(initial_code, .long i386_start_kernel) 426583323b9SThomas Gleixner 427e3238fafSJoerg Roedel#ifdef CONFIG_PAGE_TABLE_ISOLATION 428e3238fafSJoerg Roedel#define PGD_ALIGN (2 * PAGE_SIZE) 429e3238fafSJoerg Roedel#define PTI_USER_PGD_FILL 1024 430e3238fafSJoerg Roedel#else 431e3238fafSJoerg Roedel#define PGD_ALIGN (PAGE_SIZE) 432e3238fafSJoerg Roedel#define PTI_USER_PGD_FILL 0 433e3238fafSJoerg Roedel#endif 4349a163ed8SThomas Gleixner/* 4359a163ed8SThomas Gleixner * BSS section 4369a163ed8SThomas Gleixner */ 43702b7da37STim Abbott__PAGE_ALIGNED_BSS 438e3238fafSJoerg Roedel .align PGD_ALIGN 439551889a6SIan Campbell#ifdef CONFIG_X86_PAE 4401e620f9bSBoris Ostrovsky.globl initial_pg_pmd 441d50d8fe1SRusty Russellinitial_pg_pmd: 442551889a6SIan Campbell .fill 1024*KPMDS,4,0 443551889a6SIan Campbell#else 444553bbc11SArnd Bergmann.globl initial_page_table 445553bbc11SArnd Bergmanninitial_page_table: 4469a163ed8SThomas Gleixner .fill 1024,4,0 447551889a6SIan Campbell#endif 448e3238fafSJoerg Roedel .align PGD_ALIGN 449d50d8fe1SRusty Russellinitial_pg_fixmap: 4509a163ed8SThomas Gleixner .fill 1024,4,0 451e3238fafSJoerg Roedel.globl swapper_pg_dir 452e3238fafSJoerg Roedel .align PGD_ALIGN 453e3238fafSJoerg Roedelswapper_pg_dir: 454e3238fafSJoerg Roedel .fill 1024,4,0 455e3238fafSJoerg Roedel .fill PTI_USER_PGD_FILL,4,0 456553bbc11SArnd Bergmann.globl empty_zero_page 457553bbc11SArnd Bergmannempty_zero_page: 4589a163ed8SThomas Gleixner .fill 4096,1,0 459784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page) 4602bd2753fSYinghai Lu 4619a163ed8SThomas Gleixner/* 4629a163ed8SThomas Gleixner * This starts the data section. 4639a163ed8SThomas Gleixner */ 464551889a6SIan Campbell#ifdef CONFIG_X86_PAE 465abe1ee3aSTim Abbott__PAGE_ALIGNED_DATA 466551889a6SIan Campbell /* Page-aligned for the benefit of paravirt? */ 467e3238fafSJoerg Roedel .align PGD_ALIGN 468b1bd27b9SJiri SlabySYM_DATA_START(initial_page_table) 469b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ 470551889a6SIan Campbell# if KPMDS == 3 471b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 472b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 473b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 474551889a6SIan Campbell# elif KPMDS == 2 475551889a6SIan Campbell .long 0,0 476b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 477b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 478551889a6SIan Campbell# elif KPMDS == 1 479551889a6SIan Campbell .long 0,0 480551889a6SIan Campbell .long 0,0 481b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 482551889a6SIan Campbell# else 483551889a6SIan Campbell# error "Kernel PMDs should be 1, 2 or 3" 484551889a6SIan Campbell# endif 4857bf04be8SStratos Psomadakis .align PAGE_SIZE /* needs to be page-sized too */ 486f490e07cSThomas Gleixner 487f490e07cSThomas Gleixner#ifdef CONFIG_PAGE_TABLE_ISOLATION 488f490e07cSThomas Gleixner /* 489f490e07cSThomas Gleixner * PTI needs another page so sync_initial_pagetable() works correctly 490f490e07cSThomas Gleixner * and does not scribble over the data which is placed behind the 491f490e07cSThomas Gleixner * actual initial_page_table. See clone_pgd_range(). 492f490e07cSThomas Gleixner */ 493f490e07cSThomas Gleixner .fill 1024, 4, 0 494f490e07cSThomas Gleixner#endif 495f490e07cSThomas Gleixner 496b1bd27b9SJiri SlabySYM_DATA_END(initial_page_table) 497551889a6SIan Campbell#endif 498551889a6SIan Campbell 4999a163ed8SThomas Gleixner.data 50011d4c3f9SH. Peter Anvin.balign 4 50122dc3918SJosh Poimboeuf/* 502b1bd27b9SJiri Slaby * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder 503b1bd27b9SJiri Slaby * reliably detect the end of the stack. 50422dc3918SJosh Poimboeuf */ 505b1bd27b9SJiri SlabySYM_DATA(initial_stack, 506b1bd27b9SJiri Slaby .long init_thread_union + THREAD_SIZE - 507b1bd27b9SJiri Slaby SIZEOF_PTREGS - TOP_OF_KERNEL_STACK_PADDING) 5089a163ed8SThomas Gleixner 5094c5023a3SH. Peter Anvin__INITRODATA 5109a163ed8SThomas Gleixnerint_msg: 511d5e397cbSIngo Molnar .asciz "Unknown interrupt or fault at: %p %p %p\n" 5129a163ed8SThomas Gleixner 5139a163ed8SThomas Gleixner#include "../../x86/xen/xen-head.S" 5149a163ed8SThomas Gleixner 5159a163ed8SThomas Gleixner/* 5169a163ed8SThomas Gleixner * The IDT and GDT 'descriptors' are a strange 48-bit object 5179a163ed8SThomas Gleixner * only used by the lidt and lgdt instructions. They are not 5189a163ed8SThomas Gleixner * like usual segment descriptors - they consist of a 16-bit 5199a163ed8SThomas Gleixner * segment size, and 32-bit linear address value: 5209a163ed8SThomas Gleixner */ 5219a163ed8SThomas Gleixner 5224c5023a3SH. Peter Anvin .data 5239a163ed8SThomas Gleixner ALIGN 5249a163ed8SThomas Gleixner# early boot GDT descriptor (must use 1:1 address mapping) 5259a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 526b1bd27b9SJiri SlabySYM_DATA_START_LOCAL(boot_gdt_descr) 5279a163ed8SThomas Gleixner .word __BOOT_DS+7 5289a163ed8SThomas Gleixner .long boot_gdt - __PAGE_OFFSET 529b1bd27b9SJiri SlabySYM_DATA_END(boot_gdt_descr) 5309a163ed8SThomas Gleixner 5319a163ed8SThomas Gleixner# boot GDT descriptor (later on used by CPU#0): 5329a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 533b1bd27b9SJiri SlabySYM_DATA_START(early_gdt_descr) 5349a163ed8SThomas Gleixner .word GDT_ENTRIES*8-1 535dd17c8f7SRusty Russell .long gdt_page /* Overwritten for secondary CPUs */ 536b1bd27b9SJiri SlabySYM_DATA_END(early_gdt_descr) 5379a163ed8SThomas Gleixner 5389a163ed8SThomas Gleixner/* 5399a163ed8SThomas Gleixner * The boot_gdt must mirror the equivalent in setup.S and is 5409a163ed8SThomas Gleixner * used only for booting. 5419a163ed8SThomas Gleixner */ 5429a163ed8SThomas Gleixner .align L1_CACHE_BYTES 543b1bd27b9SJiri SlabySYM_DATA_START(boot_gdt) 5449a163ed8SThomas Gleixner .fill GDT_ENTRY_BOOT_CS,8,0 5459a163ed8SThomas Gleixner .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ 5469a163ed8SThomas Gleixner .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ 547b1bd27b9SJiri SlabySYM_DATA_END(boot_gdt) 548