1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 29a163ed8SThomas Gleixner/* 39a163ed8SThomas Gleixner * 49a163ed8SThomas Gleixner * Copyright (C) 1991, 1992 Linus Torvalds 59a163ed8SThomas Gleixner * 69a163ed8SThomas Gleixner * Enhanced CPU detection and feature setting code by Mike Jagdis 79a163ed8SThomas Gleixner * and Martin Mares, November 1997. 89a163ed8SThomas Gleixner */ 99a163ed8SThomas Gleixner 109a163ed8SThomas Gleixner.text 119a163ed8SThomas Gleixner#include <linux/threads.h> 128b2f7fffSSam Ravnborg#include <linux/init.h> 139a163ed8SThomas Gleixner#include <linux/linkage.h> 149a163ed8SThomas Gleixner#include <asm/segment.h> 150341c14dSJeremy Fitzhardinge#include <asm/page_types.h> 160341c14dSJeremy Fitzhardinge#include <asm/pgtable_types.h> 179a163ed8SThomas Gleixner#include <asm/cache.h> 189a163ed8SThomas Gleixner#include <asm/thread_info.h> 199a163ed8SThomas Gleixner#include <asm/asm-offsets.h> 209a163ed8SThomas Gleixner#include <asm/setup.h> 21551889a6SIan Campbell#include <asm/processor-flags.h> 228a50e513SH. Peter Anvin#include <asm/msr-index.h> 23cd4d09ecSBorislav Petkov#include <asm/cpufeatures.h> 2460a5317fSTejun Heo#include <asm/percpu.h> 254c5023a3SH. Peter Anvin#include <asm/nops.h> 26fb148d83SAlexander Kuleshov#include <asm/bootparam.h> 27784d5699SAl Viro#include <asm/export.h> 281e620f9bSBoris Ostrovsky#include <asm/pgtable_32.h> 29551889a6SIan Campbell 30551889a6SIan Campbell/* Physical address */ 31551889a6SIan Campbell#define pa(X) ((X) - __PAGE_OFFSET) 329a163ed8SThomas Gleixner 339a163ed8SThomas Gleixner/* 349a163ed8SThomas Gleixner * References to members of the new_cpu_data structure. 359a163ed8SThomas Gleixner */ 369a163ed8SThomas Gleixner 379a163ed8SThomas Gleixner#define X86 new_cpu_data+CPUINFO_x86 389a163ed8SThomas Gleixner#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 399a163ed8SThomas Gleixner#define X86_MODEL new_cpu_data+CPUINFO_x86_model 40b399151cSJia Zhang#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping 419a163ed8SThomas Gleixner#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 429a163ed8SThomas Gleixner#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 439a163ed8SThomas Gleixner#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 449a163ed8SThomas Gleixner#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 459a163ed8SThomas Gleixner 469a163ed8SThomas Gleixner 4722dc3918SJosh Poimboeuf#define SIZEOF_PTREGS 17*4 4822dc3918SJosh Poimboeuf 4904c17341SBorislav Petkov/* 50c090f532SJeremy Fitzhardinge * Worst-case size of the kernel mapping we need to make: 51147dd561SH. Peter Anvin * a relocatable kernel can live anywhere in lowmem, so we need to be able 52147dd561SH. Peter Anvin * to map all of lowmem. 53c090f532SJeremy Fitzhardinge */ 54147dd561SH. Peter AnvinKERNEL_PAGES = LOWMEM_PAGES 55c090f532SJeremy Fitzhardinge 567bf04be8SStratos PsomadakisINIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE 572bd2753fSYinghai LuRESERVE_BRK(pagetables, INIT_MAP_SIZE) 58796216a5SJeremy Fitzhardinge 599a163ed8SThomas Gleixner/* 609a163ed8SThomas Gleixner * 32-bit kernel entrypoint; only used by the boot CPU. On entry, 619a163ed8SThomas Gleixner * %esi points to the real-mode code as a 32-bit pointer. 629a163ed8SThomas Gleixner * CS and DS must be 4 GB flat segments, but we don't depend on 639a163ed8SThomas Gleixner * any particular GDT layout, because we load our own as soon as we 649a163ed8SThomas Gleixner * can. 659a163ed8SThomas Gleixner */ 664ae59b91STim Abbott__HEAD 679a163ed8SThomas GleixnerENTRY(startup_32) 68b32f96c7SJosh Poimboeuf movl pa(initial_stack),%ecx 6911d4c3f9SH. Peter Anvin 70a24e7851SRusty Russell /* test KEEP_SEGMENTS flag to see if the bootloader is asking 71a24e7851SRusty Russell us to not reload segments */ 72fb148d83SAlexander Kuleshov testb $KEEP_SEGMENTS, BP_loadflags(%esi) 73a24e7851SRusty Russell jnz 2f 749a163ed8SThomas Gleixner 759a163ed8SThomas Gleixner/* 769a163ed8SThomas Gleixner * Set segments to known values. 779a163ed8SThomas Gleixner */ 78551889a6SIan Campbell lgdt pa(boot_gdt_descr) 799a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 809a163ed8SThomas Gleixner movl %eax,%ds 819a163ed8SThomas Gleixner movl %eax,%es 829a163ed8SThomas Gleixner movl %eax,%fs 839a163ed8SThomas Gleixner movl %eax,%gs 8411d4c3f9SH. Peter Anvin movl %eax,%ss 85a24e7851SRusty Russell2: 8611d4c3f9SH. Peter Anvin leal -__PAGE_OFFSET(%ecx),%esp 879a163ed8SThomas Gleixner 889a163ed8SThomas Gleixner/* 899a163ed8SThomas Gleixner * Clear BSS first so that there are no surprises... 909a163ed8SThomas Gleixner */ 91a24e7851SRusty Russell cld 929a163ed8SThomas Gleixner xorl %eax,%eax 93551889a6SIan Campbell movl $pa(__bss_start),%edi 94551889a6SIan Campbell movl $pa(__bss_stop),%ecx 959a163ed8SThomas Gleixner subl %edi,%ecx 969a163ed8SThomas Gleixner shrl $2,%ecx 979a163ed8SThomas Gleixner rep ; stosl 989a163ed8SThomas Gleixner/* 999a163ed8SThomas Gleixner * Copy bootup parameters out of the way. 1009a163ed8SThomas Gleixner * Note: %esi still has the pointer to the real-mode data. 1019a163ed8SThomas Gleixner * With the kexec as boot loader, parameter segment might be loaded beyond 1029a163ed8SThomas Gleixner * kernel image and might not even be addressable by early boot page tables. 1039a163ed8SThomas Gleixner * (kexec on panic case). Hence copy out the parameters before initializing 1049a163ed8SThomas Gleixner * page tables. 1059a163ed8SThomas Gleixner */ 106551889a6SIan Campbell movl $pa(boot_params),%edi 1079a163ed8SThomas Gleixner movl $(PARAM_SIZE/4),%ecx 1089a163ed8SThomas Gleixner cld 1099a163ed8SThomas Gleixner rep 1109a163ed8SThomas Gleixner movsl 111551889a6SIan Campbell movl pa(boot_params) + NEW_CL_POINTER,%esi 1129a163ed8SThomas Gleixner andl %esi,%esi 113b595076aSUwe Kleine-König jz 1f # No command line 114551889a6SIan Campbell movl $pa(boot_command_line),%edi 1159a163ed8SThomas Gleixner movl $(COMMAND_LINE_SIZE/4),%ecx 1169a163ed8SThomas Gleixner rep 1179a163ed8SThomas Gleixner movsl 1189a163ed8SThomas Gleixner1: 1199a163ed8SThomas Gleixner 120dc3119e7SThomas Gleixner#ifdef CONFIG_OLPC 121fd699c76SAndres Salomon /* save OFW's pgdir table for later use when calling into OFW */ 122fd699c76SAndres Salomon movl %cr3, %eax 123fd699c76SAndres Salomon movl %eax, pa(olpc_ofw_pgd) 124fd699c76SAndres Salomon#endif 125fd699c76SAndres Salomon 126fe055896SBorislav Petkov#ifdef CONFIG_MICROCODE 12763b553c6SFenghua Yu /* Early load ucode on BSP. */ 12863b553c6SFenghua Yu call load_ucode_bsp 12963b553c6SFenghua Yu#endif 13063b553c6SFenghua Yu 1311e620f9bSBoris Ostrovsky /* Create early pagetables. */ 1321e620f9bSBoris Ostrovsky call mk_early_pgtbl_32 1331e620f9bSBoris Ostrovsky 1341e620f9bSBoris Ostrovsky /* Do early initialization of the fixmap area */ 1351e620f9bSBoris Ostrovsky movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 136551889a6SIan Campbell#ifdef CONFIG_X86_PAE 13786b2b70eSJoe Korty#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 138b40827faSBorislav Petkov movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 1391e620f9bSBoris Ostrovsky#else 140b40827faSBorislav Petkov movl %eax,pa(initial_page_table+0xffc) 141551889a6SIan Campbell#endif 142d50d8fe1SRusty Russell 143d50d8fe1SRusty Russell#ifdef CONFIG_PARAVIRT 144d50d8fe1SRusty Russell /* This is can only trip for a broken bootloader... */ 145d50d8fe1SRusty Russell cmpw $0x207, pa(boot_params + BP_version) 1461b00255fSJosh Poimboeuf jb .Ldefault_entry 147d50d8fe1SRusty Russell 148d50d8fe1SRusty Russell /* Paravirt-compatible boot parameters. Look to see what architecture 149d50d8fe1SRusty Russell we're booting under. */ 150d50d8fe1SRusty Russell movl pa(boot_params + BP_hardware_subarch), %eax 151d50d8fe1SRusty Russell cmpl $num_subarch_entries, %eax 1521b00255fSJosh Poimboeuf jae .Lbad_subarch 153d50d8fe1SRusty Russell 154d50d8fe1SRusty Russell movl pa(subarch_entries)(,%eax,4), %eax 155d50d8fe1SRusty Russell subl $__PAGE_OFFSET, %eax 156d50d8fe1SRusty Russell jmp *%eax 157d50d8fe1SRusty Russell 1581b00255fSJosh Poimboeuf.Lbad_subarch: 159d50d8fe1SRusty RussellWEAK(xen_entry) 160d50d8fe1SRusty Russell /* Unknown implementation; there's really 161d50d8fe1SRusty Russell nothing we can do at this point. */ 162d50d8fe1SRusty Russell ud2a 163d50d8fe1SRusty Russell 164d50d8fe1SRusty Russell __INITDATA 165d50d8fe1SRusty Russell 166d50d8fe1SRusty Russellsubarch_entries: 1671b00255fSJosh Poimboeuf .long .Ldefault_entry /* normal x86/PC */ 168d50d8fe1SRusty Russell .long xen_entry /* Xen hypervisor */ 1691b00255fSJosh Poimboeuf .long .Ldefault_entry /* Moorestown MID */ 170d50d8fe1SRusty Russellnum_subarch_entries = (. - subarch_entries) / 4 171d50d8fe1SRusty Russell.previous 172d50d8fe1SRusty Russell#else 1731b00255fSJosh Poimboeuf jmp .Ldefault_entry 174d50d8fe1SRusty Russell#endif /* CONFIG_PARAVIRT */ 175d50d8fe1SRusty Russell 1763e2a0cc3SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU 1773e2a0cc3SFenghua Yu/* 1783e2a0cc3SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 1793e2a0cc3SFenghua Yu * up already except stack. We just set up stack here. Then call 1803e2a0cc3SFenghua Yu * start_secondary(). 1813e2a0cc3SFenghua Yu */ 1823e2a0cc3SFenghua YuENTRY(start_cpu0) 183b32f96c7SJosh Poimboeuf movl initial_stack, %ecx 1843e2a0cc3SFenghua Yu movl %ecx, %esp 1856616a147SJosh Poimboeuf call *(initial_code) 1866616a147SJosh Poimboeuf1: jmp 1b 1873e2a0cc3SFenghua YuENDPROC(start_cpu0) 1883e2a0cc3SFenghua Yu#endif 1893e2a0cc3SFenghua Yu 1909a163ed8SThomas Gleixner/* 1919a163ed8SThomas Gleixner * Non-boot CPU entry point; entered from trampoline.S 1929a163ed8SThomas Gleixner * We can't lgdt here, because lgdt itself uses a data segment, but 1939a163ed8SThomas Gleixner * we know the trampoline has already loaded the boot_gdt for us. 1949a163ed8SThomas Gleixner * 1959a163ed8SThomas Gleixner * If cpu hotplug is not supported then this code can go in init section 1969a163ed8SThomas Gleixner * which will be freed later 1979a163ed8SThomas Gleixner */ 1989a163ed8SThomas GleixnerENTRY(startup_32_smp) 1999a163ed8SThomas Gleixner cld 2009a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 2019a163ed8SThomas Gleixner movl %eax,%ds 2029a163ed8SThomas Gleixner movl %eax,%es 2039a163ed8SThomas Gleixner movl %eax,%fs 2049a163ed8SThomas Gleixner movl %eax,%gs 205b32f96c7SJosh Poimboeuf movl pa(initial_stack),%ecx 20611d4c3f9SH. Peter Anvin movl %eax,%ss 20711d4c3f9SH. Peter Anvin leal -__PAGE_OFFSET(%ecx),%esp 20848927bbbSJarkko Sakkinen 209fe055896SBorislav Petkov#ifdef CONFIG_MICROCODE 21063b553c6SFenghua Yu /* Early load ucode on AP. */ 21163b553c6SFenghua Yu call load_ucode_ap 21263b553c6SFenghua Yu#endif 21363b553c6SFenghua Yu 2141b00255fSJosh Poimboeuf.Ldefault_entry: 215021ef050SH. Peter Anvin movl $(CR0_STATE & ~X86_CR0_PG),%eax 216021ef050SH. Peter Anvin movl %eax,%cr0 217021ef050SH. Peter Anvin 2189a163ed8SThomas Gleixner/* 2199efb58deSBorislav Petkov * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave 2209efb58deSBorislav Petkov * bits like NT set. This would confuse the debugger if this code is traced. So 2219efb58deSBorislav Petkov * initialize them properly now before switching to protected mode. That means 2229efb58deSBorislav Petkov * DF in particular (even though we have cleared it earlier after copying the 2239efb58deSBorislav Petkov * command line) because GCC expects it. 2249a163ed8SThomas Gleixner */ 2255a5a51dbSH. Peter Anvin pushl $0 2265a5a51dbSH. Peter Anvin popfl 2279efb58deSBorislav Petkov 2289efb58deSBorislav Petkov/* 2299efb58deSBorislav Petkov * New page tables may be in 4Mbyte page mode and may be using the global pages. 2309efb58deSBorislav Petkov * 2319efb58deSBorislav Petkov * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists 2329efb58deSBorislav Petkov * if and only if CPUID exists and has flags other than the FPU flag set. 2339efb58deSBorislav Petkov */ 2349efb58deSBorislav Petkov movl $-1,pa(X86_CPUID) # preset CPUID level 2359efb58deSBorislav Petkov movl $X86_EFLAGS_ID,%ecx 2369efb58deSBorislav Petkov pushl %ecx 2379efb58deSBorislav Petkov popfl # set EFLAGS=ID 2385a5a51dbSH. Peter Anvin pushfl 2399efb58deSBorislav Petkov popl %eax # get EFLAGS 2409efb58deSBorislav Petkov testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? 2411b00255fSJosh Poimboeuf jz .Lenable_paging # hw disallowed setting of ID bit 2429efb58deSBorislav Petkov # which means no CPUID and no CR4 2439efb58deSBorislav Petkov 2449efb58deSBorislav Petkov xorl %eax,%eax 2459efb58deSBorislav Petkov cpuid 2469efb58deSBorislav Petkov movl %eax,pa(X86_CPUID) # save largest std CPUID function 2475a5a51dbSH. Peter Anvin 2486662c34fSH. Peter Anvin movl $1,%eax 2496662c34fSH. Peter Anvin cpuid 2506662c34fSH. Peter Anvin andl $~1,%edx # Ignore CPUID.FPU 2511b00255fSJosh Poimboeuf jz .Lenable_paging # No flags or only CPUID.FPU = no CR4 2526662c34fSH. Peter Anvin 2535a5a51dbSH. Peter Anvin movl pa(mmu_cr4_features),%eax 2549a163ed8SThomas Gleixner movl %eax,%cr4 2559a163ed8SThomas Gleixner 2568a50e513SH. Peter Anvin testb $X86_CR4_PAE, %al # check if PAE is enabled 2571b00255fSJosh Poimboeuf jz .Lenable_paging 2589a163ed8SThomas Gleixner 2599a163ed8SThomas Gleixner /* Check if extended functions are implemented */ 2609a163ed8SThomas Gleixner movl $0x80000000, %eax 2619a163ed8SThomas Gleixner cpuid 2628a50e513SH. Peter Anvin /* Value must be in the range 0x80000001 to 0x8000ffff */ 2638a50e513SH. Peter Anvin subl $0x80000001, %eax 2648a50e513SH. Peter Anvin cmpl $(0x8000ffff-0x80000001), %eax 2651b00255fSJosh Poimboeuf ja .Lenable_paging 266ebba638aSKees Cook 267ebba638aSKees Cook /* Clear bogus XD_DISABLE bits */ 268ebba638aSKees Cook call verify_cpu 269ebba638aSKees Cook 2709a163ed8SThomas Gleixner mov $0x80000001, %eax 2719a163ed8SThomas Gleixner cpuid 2729a163ed8SThomas Gleixner /* Execute Disable bit supported? */ 2738a50e513SH. Peter Anvin btl $(X86_FEATURE_NX & 31), %edx 2741b00255fSJosh Poimboeuf jnc .Lenable_paging 2759a163ed8SThomas Gleixner 2769a163ed8SThomas Gleixner /* Setup EFER (Extended Feature Enable Register) */ 2778a50e513SH. Peter Anvin movl $MSR_EFER, %ecx 2789a163ed8SThomas Gleixner rdmsr 2799a163ed8SThomas Gleixner 2808a50e513SH. Peter Anvin btsl $_EFER_NX, %eax 2819a163ed8SThomas Gleixner /* Make changes effective */ 2829a163ed8SThomas Gleixner wrmsr 2839a163ed8SThomas Gleixner 2841b00255fSJosh Poimboeuf.Lenable_paging: 2859a163ed8SThomas Gleixner 2869a163ed8SThomas Gleixner/* 2879a163ed8SThomas Gleixner * Enable paging 2889a163ed8SThomas Gleixner */ 289b40827faSBorislav Petkov movl $pa(initial_page_table), %eax 2909a163ed8SThomas Gleixner movl %eax,%cr3 /* set the page table pointer.. */ 291021ef050SH. Peter Anvin movl $CR0_STATE,%eax 2929a163ed8SThomas Gleixner movl %eax,%cr0 /* ..and set paging (PG) bit */ 2939a163ed8SThomas Gleixner ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 2949a163ed8SThomas Gleixner1: 29511d4c3f9SH. Peter Anvin /* Shift the stack pointer to a virtual address */ 29611d4c3f9SH. Peter Anvin addl $__PAGE_OFFSET, %esp 2979a163ed8SThomas Gleixner 2989a163ed8SThomas Gleixner/* 2999a163ed8SThomas Gleixner * start system 32-bit setup. We need to re-do some of the things done 3009a163ed8SThomas Gleixner * in 16-bit mode for the "real" operations. 3019a163ed8SThomas Gleixner */ 3024c5023a3SH. Peter Anvin movl setup_once_ref,%eax 3034c5023a3SH. Peter Anvin andl %eax,%eax 3044c5023a3SH. Peter Anvin jz 1f # Did we do this already? 3054c5023a3SH. Peter Anvin call *%eax 3064c5023a3SH. Peter Anvin1: 3079a163ed8SThomas Gleixner 3089a163ed8SThomas Gleixner/* 309166df91dSBorislav Petkov * Check if it is 486 3109a163ed8SThomas Gleixner */ 311237d1548SWang YanQing movb $4,X86 # at least 486 312c3a22a26SBorislav Petkov cmpl $-1,X86_CPUID 3131b00255fSJosh Poimboeuf je .Lis486 3149a163ed8SThomas Gleixner 3159a163ed8SThomas Gleixner /* get vendor info */ 3169a163ed8SThomas Gleixner xorl %eax,%eax # call CPUID with 0 -> return vendor ID 3179a163ed8SThomas Gleixner cpuid 3189a163ed8SThomas Gleixner movl %eax,X86_CPUID # save CPUID level 3199a163ed8SThomas Gleixner movl %ebx,X86_VENDOR_ID # lo 4 chars 3209a163ed8SThomas Gleixner movl %edx,X86_VENDOR_ID+4 # next 4 chars 3219a163ed8SThomas Gleixner movl %ecx,X86_VENDOR_ID+8 # last 4 chars 3229a163ed8SThomas Gleixner 3239a163ed8SThomas Gleixner orl %eax,%eax # do we have processor info as well? 3241b00255fSJosh Poimboeuf je .Lis486 3259a163ed8SThomas Gleixner 3269a163ed8SThomas Gleixner movl $1,%eax # Use the CPUID instruction to get CPU type 3279a163ed8SThomas Gleixner cpuid 3289a163ed8SThomas Gleixner movb %al,%cl # save reg for future use 3299a163ed8SThomas Gleixner andb $0x0f,%ah # mask processor family 3309a163ed8SThomas Gleixner movb %ah,X86 3319a163ed8SThomas Gleixner andb $0xf0,%al # mask model 3329a163ed8SThomas Gleixner shrb $4,%al 3339a163ed8SThomas Gleixner movb %al,X86_MODEL 3349a163ed8SThomas Gleixner andb $0x0f,%cl # mask mask revision 335b399151cSJia Zhang movb %cl,X86_STEPPING 3369a163ed8SThomas Gleixner movl %edx,X86_CAPABILITY 3379a163ed8SThomas Gleixner 3381b00255fSJosh Poimboeuf.Lis486: 339c3a22a26SBorislav Petkov movl $0x50022,%ecx # set AM, WP, NE and MP 340166df91dSBorislav Petkov movl %cr0,%eax 3419a163ed8SThomas Gleixner andl $0x80000011,%eax # Save PG,PE,ET 3429a163ed8SThomas Gleixner orl %ecx,%eax 3439a163ed8SThomas Gleixner movl %eax,%cr0 3449a163ed8SThomas Gleixner 3459a163ed8SThomas Gleixner lgdt early_gdt_descr 3469a163ed8SThomas Gleixner ljmp $(__KERNEL_CS),$1f 3479a163ed8SThomas Gleixner1: movl $(__KERNEL_DS),%eax # reload all the segment registers 3489a163ed8SThomas Gleixner movl %eax,%ss # after changing gdt. 3499a163ed8SThomas Gleixner 3509a163ed8SThomas Gleixner movl $(__USER_DS),%eax # DS/ES contains default USER segment 3519a163ed8SThomas Gleixner movl %eax,%ds 3529a163ed8SThomas Gleixner movl %eax,%es 3539a163ed8SThomas Gleixner 3540dd76d73SBrian Gerst movl $(__KERNEL_PERCPU), %eax 3550dd76d73SBrian Gerst movl %eax,%fs # set this cpu's percpu 3560dd76d73SBrian Gerst 35760a5317fSTejun Heo movl $(__KERNEL_STACK_CANARY),%eax 3589a163ed8SThomas Gleixner movl %eax,%gs 35960a5317fSTejun Heo 36060a5317fSTejun Heo xorl %eax,%eax # Clear LDT 3619a163ed8SThomas Gleixner lldt %ax 3629a163ed8SThomas Gleixner 3636616a147SJosh Poimboeuf call *(initial_code) 3646616a147SJosh Poimboeuf1: jmp 1b 3656616a147SJosh PoimboeufENDPROC(startup_32_smp) 3669a163ed8SThomas Gleixner 3674c5023a3SH. Peter Anvin#include "verify_cpu.S" 3684c5023a3SH. Peter Anvin 3699a163ed8SThomas Gleixner/* 3704c5023a3SH. Peter Anvin * setup_once 3719a163ed8SThomas Gleixner * 3724c5023a3SH. Peter Anvin * The setup work we only want to run on the BSP. 3739a163ed8SThomas Gleixner * 3749a163ed8SThomas Gleixner * Warning: %esi is live across this function. 3759a163ed8SThomas Gleixner */ 3764c5023a3SH. Peter Anvin__INIT 3774c5023a3SH. Peter Anvinsetup_once: 3784c5023a3SH. Peter Anvin#ifdef CONFIG_CC_STACKPROTECTOR 3794c5023a3SH. Peter Anvin /* 3804c5023a3SH. Peter Anvin * Configure the stack canary. The linker can't handle this by 3814c5023a3SH. Peter Anvin * relocation. Manually set base address in stack canary 3824c5023a3SH. Peter Anvin * segment descriptor. 3834c5023a3SH. Peter Anvin */ 3844c5023a3SH. Peter Anvin movl $gdt_page,%eax 3854c5023a3SH. Peter Anvin movl $stack_canary,%ecx 3864c5023a3SH. Peter Anvin movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 3874c5023a3SH. Peter Anvin shrl $16, %ecx 3884c5023a3SH. Peter Anvin movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 3894c5023a3SH. Peter Anvin movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) 3904c5023a3SH. Peter Anvin#endif 3919a163ed8SThomas Gleixner 3924c5023a3SH. Peter Anvin andl $0,setup_once_ref /* Once is enough, thanks */ 3939a163ed8SThomas Gleixner ret 3949a163ed8SThomas Gleixner 395425be567SAndy LutomirskiENTRY(early_idt_handler_array) 3964c5023a3SH. Peter Anvin # 36(%esp) %eflags 3974c5023a3SH. Peter Anvin # 32(%esp) %cs 3984c5023a3SH. Peter Anvin # 28(%esp) %eip 3994c5023a3SH. Peter Anvin # 24(%rsp) error code 4004c5023a3SH. Peter Anvin i = 0 4014c5023a3SH. Peter Anvin .rept NUM_EXCEPTION_VECTORS 40282c62fa0SJosh Poimboeuf .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 4034c5023a3SH. Peter Anvin pushl $0 # Dummy error code, to make stack frame uniform 4044c5023a3SH. Peter Anvin .endif 4054c5023a3SH. Peter Anvin pushl $i # 20(%esp) Vector number 406425be567SAndy Lutomirski jmp early_idt_handler_common 4074c5023a3SH. Peter Anvin i = i + 1 408425be567SAndy Lutomirski .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 4094c5023a3SH. Peter Anvin .endr 410425be567SAndy LutomirskiENDPROC(early_idt_handler_array) 4119a163ed8SThomas Gleixner 412425be567SAndy Lutomirskiearly_idt_handler_common: 413425be567SAndy Lutomirski /* 414425be567SAndy Lutomirski * The stack is the hardware frame, an error code or zero, and the 415425be567SAndy Lutomirski * vector number. 416425be567SAndy Lutomirski */ 4179a163ed8SThomas Gleixner cld 4185fa10196SH. Peter Anvin 4194c5023a3SH. Peter Anvin incl %ss:early_recursion_flag 4204c5023a3SH. Peter Anvin 4217bbcdb1cSAndy Lutomirski /* The vector number is in pt_regs->gs */ 4224c5023a3SH. Peter Anvin 4237bbcdb1cSAndy Lutomirski cld 424630c1863SAndy Lutomirski pushl %fs /* pt_regs->fs (__fsh varies by model) */ 425630c1863SAndy Lutomirski pushl %es /* pt_regs->es (__esh varies by model) */ 426630c1863SAndy Lutomirski pushl %ds /* pt_regs->ds (__dsh varies by model) */ 4277bbcdb1cSAndy Lutomirski pushl %eax /* pt_regs->ax */ 4287bbcdb1cSAndy Lutomirski pushl %ebp /* pt_regs->bp */ 4297bbcdb1cSAndy Lutomirski pushl %edi /* pt_regs->di */ 4307bbcdb1cSAndy Lutomirski pushl %esi /* pt_regs->si */ 4317bbcdb1cSAndy Lutomirski pushl %edx /* pt_regs->dx */ 4327bbcdb1cSAndy Lutomirski pushl %ecx /* pt_regs->cx */ 4337bbcdb1cSAndy Lutomirski pushl %ebx /* pt_regs->bx */ 4347bbcdb1cSAndy Lutomirski 4357bbcdb1cSAndy Lutomirski /* Fix up DS and ES */ 4367bbcdb1cSAndy Lutomirski movl $(__KERNEL_DS), %ecx 4377bbcdb1cSAndy Lutomirski movl %ecx, %ds 4387bbcdb1cSAndy Lutomirski movl %ecx, %es 4397bbcdb1cSAndy Lutomirski 4407bbcdb1cSAndy Lutomirski /* Load the vector number into EDX */ 4417bbcdb1cSAndy Lutomirski movl PT_GS(%esp), %edx 4427bbcdb1cSAndy Lutomirski 443630c1863SAndy Lutomirski /* Load GS into pt_regs->gs (and maybe clobber __gsh) */ 4447bbcdb1cSAndy Lutomirski movw %gs, PT_GS(%esp) 4457bbcdb1cSAndy Lutomirski 4467bbcdb1cSAndy Lutomirski movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */ 4474c5023a3SH. Peter Anvin call early_fixup_exception 4487bbcdb1cSAndy Lutomirski 4497bbcdb1cSAndy Lutomirski popl %ebx /* pt_regs->bx */ 4507bbcdb1cSAndy Lutomirski popl %ecx /* pt_regs->cx */ 4517bbcdb1cSAndy Lutomirski popl %edx /* pt_regs->dx */ 4527bbcdb1cSAndy Lutomirski popl %esi /* pt_regs->si */ 4537bbcdb1cSAndy Lutomirski popl %edi /* pt_regs->di */ 4547bbcdb1cSAndy Lutomirski popl %ebp /* pt_regs->bp */ 4557bbcdb1cSAndy Lutomirski popl %eax /* pt_regs->ax */ 456630c1863SAndy Lutomirski popl %ds /* pt_regs->ds (always ignores __dsh) */ 457630c1863SAndy Lutomirski popl %es /* pt_regs->es (always ignores __esh) */ 458630c1863SAndy Lutomirski popl %fs /* pt_regs->fs (always ignores __fsh) */ 459630c1863SAndy Lutomirski popl %gs /* pt_regs->gs (always ignores __gsh) */ 4607bbcdb1cSAndy Lutomirski decl %ss:early_recursion_flag 4617bbcdb1cSAndy Lutomirski addl $4, %esp /* pop pt_regs->orig_ax */ 4627bbcdb1cSAndy Lutomirski iret 463425be567SAndy LutomirskiENDPROC(early_idt_handler_common) 4644c5023a3SH. Peter Anvin 4659a163ed8SThomas Gleixner/* This is the default interrupt "handler" :-) */ 46687e81786SThomas GleixnerENTRY(early_ignore_irq) 4679a163ed8SThomas Gleixner cld 4689a163ed8SThomas Gleixner#ifdef CONFIG_PRINTK 4699a163ed8SThomas Gleixner pushl %eax 4709a163ed8SThomas Gleixner pushl %ecx 4719a163ed8SThomas Gleixner pushl %edx 4729a163ed8SThomas Gleixner pushl %es 4739a163ed8SThomas Gleixner pushl %ds 4749a163ed8SThomas Gleixner movl $(__KERNEL_DS),%eax 4759a163ed8SThomas Gleixner movl %eax,%ds 4769a163ed8SThomas Gleixner movl %eax,%es 4779a163ed8SThomas Gleixner cmpl $2,early_recursion_flag 4789a163ed8SThomas Gleixner je hlt_loop 4799a163ed8SThomas Gleixner incl early_recursion_flag 4809a163ed8SThomas Gleixner pushl 16(%esp) 4819a163ed8SThomas Gleixner pushl 24(%esp) 4829a163ed8SThomas Gleixner pushl 32(%esp) 4839a163ed8SThomas Gleixner pushl 40(%esp) 4849a163ed8SThomas Gleixner pushl $int_msg 4859a163ed8SThomas Gleixner call printk 486d5e397cbSIngo Molnar 487d5e397cbSIngo Molnar call dump_stack 488d5e397cbSIngo Molnar 4899a163ed8SThomas Gleixner addl $(5*4),%esp 4909a163ed8SThomas Gleixner popl %ds 4919a163ed8SThomas Gleixner popl %es 4929a163ed8SThomas Gleixner popl %edx 4939a163ed8SThomas Gleixner popl %ecx 4949a163ed8SThomas Gleixner popl %eax 4959a163ed8SThomas Gleixner#endif 4969a163ed8SThomas Gleixner iret 4970e861fbbSAndy Lutomirski 4980e861fbbSAndy Lutomirskihlt_loop: 4990e861fbbSAndy Lutomirski hlt 5000e861fbbSAndy Lutomirski jmp hlt_loop 50187e81786SThomas GleixnerENDPROC(early_ignore_irq) 50204b5de3aSJiri Slaby 5034c5023a3SH. Peter Anvin__INITDATA 5044c5023a3SH. Peter Anvin .align 4 5050e861fbbSAndy LutomirskiGLOBAL(early_recursion_flag) 5064c5023a3SH. Peter Anvin .long 0 507ebba638aSKees Cook 5080e83815bSRobert Richter__REFDATA 509583323b9SThomas Gleixner .align 4 510583323b9SThomas GleixnerENTRY(initial_code) 511583323b9SThomas Gleixner .long i386_start_kernel 5124c5023a3SH. Peter AnvinENTRY(setup_once_ref) 5134c5023a3SH. Peter Anvin .long setup_once 514583323b9SThomas Gleixner 5159a163ed8SThomas Gleixner/* 5169a163ed8SThomas Gleixner * BSS section 5179a163ed8SThomas Gleixner */ 51802b7da37STim Abbott__PAGE_ALIGNED_BSS 5197bf04be8SStratos Psomadakis .align PAGE_SIZE 520551889a6SIan Campbell#ifdef CONFIG_X86_PAE 5211e620f9bSBoris Ostrovsky.globl initial_pg_pmd 522d50d8fe1SRusty Russellinitial_pg_pmd: 523551889a6SIan Campbell .fill 1024*KPMDS,4,0 524551889a6SIan Campbell#else 525553bbc11SArnd Bergmann.globl initial_page_table 526553bbc11SArnd Bergmanninitial_page_table: 5279a163ed8SThomas Gleixner .fill 1024,4,0 528551889a6SIan Campbell#endif 529d50d8fe1SRusty Russellinitial_pg_fixmap: 5309a163ed8SThomas Gleixner .fill 1024,4,0 531553bbc11SArnd Bergmann.globl empty_zero_page 532553bbc11SArnd Bergmannempty_zero_page: 5339a163ed8SThomas Gleixner .fill 4096,1,0 534553bbc11SArnd Bergmann.globl swapper_pg_dir 535553bbc11SArnd Bergmannswapper_pg_dir: 536b40827faSBorislav Petkov .fill 1024,4,0 537784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page) 5382bd2753fSYinghai Lu 5399a163ed8SThomas Gleixner/* 5409a163ed8SThomas Gleixner * This starts the data section. 5419a163ed8SThomas Gleixner */ 542551889a6SIan Campbell#ifdef CONFIG_X86_PAE 543abe1ee3aSTim Abbott__PAGE_ALIGNED_DATA 544551889a6SIan Campbell /* Page-aligned for the benefit of paravirt? */ 5457bf04be8SStratos Psomadakis .align PAGE_SIZE 546b40827faSBorislav PetkovENTRY(initial_page_table) 547b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ 548551889a6SIan Campbell# if KPMDS == 3 549b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 550b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 551b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 552551889a6SIan Campbell# elif KPMDS == 2 553551889a6SIan Campbell .long 0,0 554b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 555b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 556551889a6SIan Campbell# elif KPMDS == 1 557551889a6SIan Campbell .long 0,0 558551889a6SIan Campbell .long 0,0 559b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 560551889a6SIan Campbell# else 561551889a6SIan Campbell# error "Kernel PMDs should be 1, 2 or 3" 562551889a6SIan Campbell# endif 5637bf04be8SStratos Psomadakis .align PAGE_SIZE /* needs to be page-sized too */ 564551889a6SIan Campbell#endif 565551889a6SIan Campbell 5669a163ed8SThomas Gleixner.data 56711d4c3f9SH. Peter Anvin.balign 4 568b32f96c7SJosh PoimboeufENTRY(initial_stack) 56922dc3918SJosh Poimboeuf /* 57022dc3918SJosh Poimboeuf * The SIZEOF_PTREGS gap is a convention which helps the in-kernel 57122dc3918SJosh Poimboeuf * unwinder reliably detect the end of the stack. 57222dc3918SJosh Poimboeuf */ 57322dc3918SJosh Poimboeuf .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - \ 57422dc3918SJosh Poimboeuf TOP_OF_KERNEL_STACK_PADDING; 5759a163ed8SThomas Gleixner 5764c5023a3SH. Peter Anvin__INITRODATA 5779a163ed8SThomas Gleixnerint_msg: 578d5e397cbSIngo Molnar .asciz "Unknown interrupt or fault at: %p %p %p\n" 5799a163ed8SThomas Gleixner 5809a163ed8SThomas Gleixner#include "../../x86/xen/xen-head.S" 5819a163ed8SThomas Gleixner 5829a163ed8SThomas Gleixner/* 5839a163ed8SThomas Gleixner * The IDT and GDT 'descriptors' are a strange 48-bit object 5849a163ed8SThomas Gleixner * only used by the lidt and lgdt instructions. They are not 5859a163ed8SThomas Gleixner * like usual segment descriptors - they consist of a 16-bit 5869a163ed8SThomas Gleixner * segment size, and 32-bit linear address value: 5879a163ed8SThomas Gleixner */ 5889a163ed8SThomas Gleixner 5894c5023a3SH. Peter Anvin .data 5909a163ed8SThomas Gleixner.globl boot_gdt_descr 5919a163ed8SThomas Gleixner 5929a163ed8SThomas Gleixner ALIGN 5939a163ed8SThomas Gleixner# early boot GDT descriptor (must use 1:1 address mapping) 5949a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 5959a163ed8SThomas Gleixnerboot_gdt_descr: 5969a163ed8SThomas Gleixner .word __BOOT_DS+7 5979a163ed8SThomas Gleixner .long boot_gdt - __PAGE_OFFSET 5989a163ed8SThomas Gleixner 5999a163ed8SThomas Gleixner# boot GDT descriptor (later on used by CPU#0): 6009a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 6019a163ed8SThomas GleixnerENTRY(early_gdt_descr) 6029a163ed8SThomas Gleixner .word GDT_ENTRIES*8-1 603dd17c8f7SRusty Russell .long gdt_page /* Overwritten for secondary CPUs */ 6049a163ed8SThomas Gleixner 6059a163ed8SThomas Gleixner/* 6069a163ed8SThomas Gleixner * The boot_gdt must mirror the equivalent in setup.S and is 6079a163ed8SThomas Gleixner * used only for booting. 6089a163ed8SThomas Gleixner */ 6099a163ed8SThomas Gleixner .align L1_CACHE_BYTES 6109a163ed8SThomas GleixnerENTRY(boot_gdt) 6119a163ed8SThomas Gleixner .fill GDT_ENTRY_BOOT_CS,8,0 6129a163ed8SThomas Gleixner .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ 6139a163ed8SThomas Gleixner .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ 614