19a163ed8SThomas Gleixner/* 29a163ed8SThomas Gleixner * linux/arch/i386/kernel/head.S -- the 32-bit startup code. 39a163ed8SThomas Gleixner * 49a163ed8SThomas Gleixner * Copyright (C) 1991, 1992 Linus Torvalds 59a163ed8SThomas Gleixner * 69a163ed8SThomas Gleixner * Enhanced CPU detection and feature setting code by Mike Jagdis 79a163ed8SThomas Gleixner * and Martin Mares, November 1997. 89a163ed8SThomas Gleixner */ 99a163ed8SThomas Gleixner 109a163ed8SThomas Gleixner.text 119a163ed8SThomas Gleixner#include <linux/threads.h> 129a163ed8SThomas Gleixner#include <linux/linkage.h> 139a163ed8SThomas Gleixner#include <asm/segment.h> 149a163ed8SThomas Gleixner#include <asm/page.h> 159a163ed8SThomas Gleixner#include <asm/pgtable.h> 169a163ed8SThomas Gleixner#include <asm/desc.h> 179a163ed8SThomas Gleixner#include <asm/cache.h> 189a163ed8SThomas Gleixner#include <asm/thread_info.h> 199a163ed8SThomas Gleixner#include <asm/asm-offsets.h> 209a163ed8SThomas Gleixner#include <asm/setup.h> 219a163ed8SThomas Gleixner 229a163ed8SThomas Gleixner/* 239a163ed8SThomas Gleixner * References to members of the new_cpu_data structure. 249a163ed8SThomas Gleixner */ 259a163ed8SThomas Gleixner 269a163ed8SThomas Gleixner#define X86 new_cpu_data+CPUINFO_x86 279a163ed8SThomas Gleixner#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 289a163ed8SThomas Gleixner#define X86_MODEL new_cpu_data+CPUINFO_x86_model 299a163ed8SThomas Gleixner#define X86_MASK new_cpu_data+CPUINFO_x86_mask 309a163ed8SThomas Gleixner#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 319a163ed8SThomas Gleixner#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 329a163ed8SThomas Gleixner#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 339a163ed8SThomas Gleixner#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 349a163ed8SThomas Gleixner 359a163ed8SThomas Gleixner/* 369a163ed8SThomas Gleixner * This is how much memory *in addition to the memory covered up to 379a163ed8SThomas Gleixner * and including _end* we need mapped initially. 389a163ed8SThomas Gleixner * We need: 399a163ed8SThomas Gleixner * - one bit for each possible page, but only in low memory, which means 409a163ed8SThomas Gleixner * 2^32/4096/8 = 128K worst case (4G/4G split.) 419a163ed8SThomas Gleixner * - enough space to map all low memory, which means 429a163ed8SThomas Gleixner * (2^32/4096) / 1024 pages (worst case, non PAE) 439a163ed8SThomas Gleixner * (2^32/4096) / 512 + 4 pages (worst case for PAE) 449a163ed8SThomas Gleixner * - a few pages for allocator use before the kernel pagetable has 459a163ed8SThomas Gleixner * been set up 469a163ed8SThomas Gleixner * 479a163ed8SThomas Gleixner * Modulo rounding, each megabyte assigned here requires a kilobyte of 489a163ed8SThomas Gleixner * memory, which is currently unreclaimed. 499a163ed8SThomas Gleixner * 509a163ed8SThomas Gleixner * This should be a multiple of a page. 519a163ed8SThomas Gleixner */ 529a163ed8SThomas GleixnerLOW_PAGES = 1<<(32-PAGE_SHIFT_asm) 539a163ed8SThomas Gleixner 541e3e1972SIngo Molnar/* 551e3e1972SIngo Molnar * To preserve the DMA pool in PAGEALLOC kernels, we'll allocate 561e3e1972SIngo Molnar * pagetables from above the 16MB DMA limit, so we'll have to set 571e3e1972SIngo Molnar * up pagetables 16MB more (worst-case): 581e3e1972SIngo Molnar */ 591e3e1972SIngo Molnar#ifdef CONFIG_DEBUG_PAGEALLOC 601e3e1972SIngo MolnarLOW_PAGES = LOW_PAGES + 0x1000000 611e3e1972SIngo Molnar#endif 621e3e1972SIngo Molnar 639a163ed8SThomas Gleixner#if PTRS_PER_PMD > 1 649a163ed8SThomas GleixnerPAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD 659a163ed8SThomas Gleixner#else 669a163ed8SThomas GleixnerPAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PGD) 679a163ed8SThomas Gleixner#endif 689a163ed8SThomas GleixnerBOOTBITMAP_SIZE = LOW_PAGES / 8 699a163ed8SThomas GleixnerALLOCATOR_SLOP = 4 709a163ed8SThomas Gleixner 719a163ed8SThomas GleixnerINIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_SIZE_asm 729a163ed8SThomas Gleixner 739a163ed8SThomas Gleixner/* 749a163ed8SThomas Gleixner * 32-bit kernel entrypoint; only used by the boot CPU. On entry, 759a163ed8SThomas Gleixner * %esi points to the real-mode code as a 32-bit pointer. 769a163ed8SThomas Gleixner * CS and DS must be 4 GB flat segments, but we don't depend on 779a163ed8SThomas Gleixner * any particular GDT layout, because we load our own as soon as we 789a163ed8SThomas Gleixner * can. 799a163ed8SThomas Gleixner */ 809a163ed8SThomas Gleixner.section .text.head,"ax",@progbits 819a163ed8SThomas GleixnerENTRY(startup_32) 82a24e7851SRusty Russell /* check to see if KEEP_SEGMENTS flag is meaningful */ 83a24e7851SRusty Russell cmpw $0x207, BP_version(%esi) 84a24e7851SRusty Russell jb 1f 85a24e7851SRusty Russell 86a24e7851SRusty Russell /* test KEEP_SEGMENTS flag to see if the bootloader is asking 87a24e7851SRusty Russell us to not reload segments */ 88a24e7851SRusty Russell testb $(1<<6), BP_loadflags(%esi) 89a24e7851SRusty Russell jnz 2f 909a163ed8SThomas Gleixner 919a163ed8SThomas Gleixner/* 929a163ed8SThomas Gleixner * Set segments to known values. 939a163ed8SThomas Gleixner */ 94a24e7851SRusty Russell1: lgdt boot_gdt_descr - __PAGE_OFFSET 959a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 969a163ed8SThomas Gleixner movl %eax,%ds 979a163ed8SThomas Gleixner movl %eax,%es 989a163ed8SThomas Gleixner movl %eax,%fs 999a163ed8SThomas Gleixner movl %eax,%gs 100a24e7851SRusty Russell2: 1019a163ed8SThomas Gleixner 1029a163ed8SThomas Gleixner/* 1039a163ed8SThomas Gleixner * Clear BSS first so that there are no surprises... 1049a163ed8SThomas Gleixner */ 105a24e7851SRusty Russell cld 1069a163ed8SThomas Gleixner xorl %eax,%eax 1079a163ed8SThomas Gleixner movl $__bss_start - __PAGE_OFFSET,%edi 1089a163ed8SThomas Gleixner movl $__bss_stop - __PAGE_OFFSET,%ecx 1099a163ed8SThomas Gleixner subl %edi,%ecx 1109a163ed8SThomas Gleixner shrl $2,%ecx 1119a163ed8SThomas Gleixner rep ; stosl 1129a163ed8SThomas Gleixner/* 1139a163ed8SThomas Gleixner * Copy bootup parameters out of the way. 1149a163ed8SThomas Gleixner * Note: %esi still has the pointer to the real-mode data. 1159a163ed8SThomas Gleixner * With the kexec as boot loader, parameter segment might be loaded beyond 1169a163ed8SThomas Gleixner * kernel image and might not even be addressable by early boot page tables. 1179a163ed8SThomas Gleixner * (kexec on panic case). Hence copy out the parameters before initializing 1189a163ed8SThomas Gleixner * page tables. 1199a163ed8SThomas Gleixner */ 1209a163ed8SThomas Gleixner movl $(boot_params - __PAGE_OFFSET),%edi 1219a163ed8SThomas Gleixner movl $(PARAM_SIZE/4),%ecx 1229a163ed8SThomas Gleixner cld 1239a163ed8SThomas Gleixner rep 1249a163ed8SThomas Gleixner movsl 1259a163ed8SThomas Gleixner movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi 1269a163ed8SThomas Gleixner andl %esi,%esi 127fa76dab9SH. Peter Anvin jz 1f # No comand line 1289a163ed8SThomas Gleixner movl $(boot_command_line - __PAGE_OFFSET),%edi 1299a163ed8SThomas Gleixner movl $(COMMAND_LINE_SIZE/4),%ecx 1309a163ed8SThomas Gleixner rep 1319a163ed8SThomas Gleixner movsl 1329a163ed8SThomas Gleixner1: 1339a163ed8SThomas Gleixner 134a24e7851SRusty Russell#ifdef CONFIG_PARAVIRT 135a24e7851SRusty Russell cmpw $0x207, (boot_params + BP_version - __PAGE_OFFSET) 136a24e7851SRusty Russell jb default_entry 137a24e7851SRusty Russell 138a24e7851SRusty Russell /* Paravirt-compatible boot parameters. Look to see what architecture 139a24e7851SRusty Russell we're booting under. */ 140a24e7851SRusty Russell movl (boot_params + BP_hardware_subarch - __PAGE_OFFSET), %eax 141a24e7851SRusty Russell cmpl $num_subarch_entries, %eax 142a24e7851SRusty Russell jae bad_subarch 143a24e7851SRusty Russell 144a24e7851SRusty Russell movl subarch_entries - __PAGE_OFFSET(,%eax,4), %eax 145a24e7851SRusty Russell subl $__PAGE_OFFSET, %eax 146a24e7851SRusty Russell jmp *%eax 147a24e7851SRusty Russell 148a24e7851SRusty Russellbad_subarch: 149a24e7851SRusty RussellWEAK(lguest_entry) 150a24e7851SRusty RussellWEAK(xen_entry) 151a24e7851SRusty Russell /* Unknown implementation; there's really 152a24e7851SRusty Russell nothing we can do at this point. */ 153a24e7851SRusty Russell ud2a 154a24e7851SRusty Russell.data 155a24e7851SRusty Russellsubarch_entries: 156a24e7851SRusty Russell .long default_entry /* normal x86/PC */ 157a24e7851SRusty Russell .long lguest_entry /* lguest hypervisor */ 158a24e7851SRusty Russell .long xen_entry /* Xen hypervisor */ 159a24e7851SRusty Russellnum_subarch_entries = (. - subarch_entries) / 4 160a24e7851SRusty Russell.previous 161a24e7851SRusty Russell#endif /* CONFIG_PARAVIRT */ 162a24e7851SRusty Russell 1639a163ed8SThomas Gleixner/* 1649a163ed8SThomas Gleixner * Initialize page tables. This creates a PDE and a set of page 1659a163ed8SThomas Gleixner * tables, which are located immediately beyond _end. The variable 1669a163ed8SThomas Gleixner * init_pg_tables_end is set up to point to the first "safe" location. 1679a163ed8SThomas Gleixner * Mappings are created both at virtual address 0 (identity mapping) 1689a163ed8SThomas Gleixner * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END. 1699a163ed8SThomas Gleixner * 1709a163ed8SThomas Gleixner * Warning: don't use %esi or the stack in this code. However, %esp 1719a163ed8SThomas Gleixner * can be used as a GPR if you really need it... 1729a163ed8SThomas Gleixner */ 1739a163ed8SThomas Gleixnerpage_pde_offset = (__PAGE_OFFSET >> 20); 1749a163ed8SThomas Gleixner 175a24e7851SRusty Russelldefault_entry: 1769a163ed8SThomas Gleixner movl $(pg0 - __PAGE_OFFSET), %edi 1779a163ed8SThomas Gleixner movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 1789a163ed8SThomas Gleixner movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */ 1799a163ed8SThomas Gleixner10: 1809a163ed8SThomas Gleixner leal 0x007(%edi),%ecx /* Create PDE entry */ 1819a163ed8SThomas Gleixner movl %ecx,(%edx) /* Store identity PDE entry */ 1829a163ed8SThomas Gleixner movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 1839a163ed8SThomas Gleixner addl $4,%edx 1849a163ed8SThomas Gleixner movl $1024, %ecx 1859a163ed8SThomas Gleixner11: 1869a163ed8SThomas Gleixner stosl 1879a163ed8SThomas Gleixner addl $0x1000,%eax 1889a163ed8SThomas Gleixner loop 11b 1899a163ed8SThomas Gleixner /* End condition: we must map up to and including INIT_MAP_BEYOND_END */ 1909a163ed8SThomas Gleixner /* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */ 1919a163ed8SThomas Gleixner leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp 1929a163ed8SThomas Gleixner cmpl %ebp,%eax 1939a163ed8SThomas Gleixner jb 10b 1949a163ed8SThomas Gleixner movl %edi,(init_pg_tables_end - __PAGE_OFFSET) 1959a163ed8SThomas Gleixner 19617d57a92SEric W. Biederman /* Do an early initialization of the fixmap area */ 19717d57a92SEric W. Biederman movl $(swapper_pg_dir - __PAGE_OFFSET), %edx 19817d57a92SEric W. Biederman movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax 199476c6c11SRusty Russell addl $0x67, %eax /* 0x67 == _PAGE_TABLE */ 20017d57a92SEric W. Biederman movl %eax, 4092(%edx) 20117d57a92SEric W. Biederman 2029a163ed8SThomas Gleixner xorl %ebx,%ebx /* This is the boot CPU (BSP) */ 2039a163ed8SThomas Gleixner jmp 3f 2049a163ed8SThomas Gleixner/* 2059a163ed8SThomas Gleixner * Non-boot CPU entry point; entered from trampoline.S 2069a163ed8SThomas Gleixner * We can't lgdt here, because lgdt itself uses a data segment, but 2079a163ed8SThomas Gleixner * we know the trampoline has already loaded the boot_gdt for us. 2089a163ed8SThomas Gleixner * 2099a163ed8SThomas Gleixner * If cpu hotplug is not supported then this code can go in init section 2109a163ed8SThomas Gleixner * which will be freed later 2119a163ed8SThomas Gleixner */ 2129a163ed8SThomas Gleixner 2139a163ed8SThomas Gleixner#ifndef CONFIG_HOTPLUG_CPU 2149a163ed8SThomas Gleixner.section .init.text,"ax",@progbits 2159a163ed8SThomas Gleixner#endif 2169a163ed8SThomas Gleixner 2179a163ed8SThomas Gleixner#ifdef CONFIG_SMP 2189a163ed8SThomas GleixnerENTRY(startup_32_smp) 2199a163ed8SThomas Gleixner cld 2209a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 2219a163ed8SThomas Gleixner movl %eax,%ds 2229a163ed8SThomas Gleixner movl %eax,%es 2239a163ed8SThomas Gleixner movl %eax,%fs 2249a163ed8SThomas Gleixner movl %eax,%gs 2259a163ed8SThomas Gleixner 2269a163ed8SThomas Gleixner/* 2279a163ed8SThomas Gleixner * New page tables may be in 4Mbyte page mode and may 2289a163ed8SThomas Gleixner * be using the global pages. 2299a163ed8SThomas Gleixner * 2309a163ed8SThomas Gleixner * NOTE! If we are on a 486 we may have no cr4 at all! 2319a163ed8SThomas Gleixner * So we do not try to touch it unless we really have 2329a163ed8SThomas Gleixner * some bits in it to set. This won't work if the BSP 2339a163ed8SThomas Gleixner * implements cr4 but this AP does not -- very unlikely 2349a163ed8SThomas Gleixner * but be warned! The same applies to the pse feature 2359a163ed8SThomas Gleixner * if not equally supported. --macro 2369a163ed8SThomas Gleixner * 2379a163ed8SThomas Gleixner * NOTE! We have to correct for the fact that we're 2389a163ed8SThomas Gleixner * not yet offset PAGE_OFFSET.. 2399a163ed8SThomas Gleixner */ 2409a163ed8SThomas Gleixner#define cr4_bits mmu_cr4_features-__PAGE_OFFSET 2419a163ed8SThomas Gleixner movl cr4_bits,%edx 2429a163ed8SThomas Gleixner andl %edx,%edx 2439a163ed8SThomas Gleixner jz 6f 2449a163ed8SThomas Gleixner movl %cr4,%eax # Turn on paging options (PSE,PAE,..) 2459a163ed8SThomas Gleixner orl %edx,%eax 2469a163ed8SThomas Gleixner movl %eax,%cr4 2479a163ed8SThomas Gleixner 2489a163ed8SThomas Gleixner btl $5, %eax # check if PAE is enabled 2499a163ed8SThomas Gleixner jnc 6f 2509a163ed8SThomas Gleixner 2519a163ed8SThomas Gleixner /* Check if extended functions are implemented */ 2529a163ed8SThomas Gleixner movl $0x80000000, %eax 2539a163ed8SThomas Gleixner cpuid 2549a163ed8SThomas Gleixner cmpl $0x80000000, %eax 2559a163ed8SThomas Gleixner jbe 6f 2569a163ed8SThomas Gleixner mov $0x80000001, %eax 2579a163ed8SThomas Gleixner cpuid 2589a163ed8SThomas Gleixner /* Execute Disable bit supported? */ 2599a163ed8SThomas Gleixner btl $20, %edx 2609a163ed8SThomas Gleixner jnc 6f 2619a163ed8SThomas Gleixner 2629a163ed8SThomas Gleixner /* Setup EFER (Extended Feature Enable Register) */ 2639a163ed8SThomas Gleixner movl $0xc0000080, %ecx 2649a163ed8SThomas Gleixner rdmsr 2659a163ed8SThomas Gleixner 2669a163ed8SThomas Gleixner btsl $11, %eax 2679a163ed8SThomas Gleixner /* Make changes effective */ 2689a163ed8SThomas Gleixner wrmsr 2699a163ed8SThomas Gleixner 2709a163ed8SThomas Gleixner6: 2719a163ed8SThomas Gleixner /* This is a secondary processor (AP) */ 2729a163ed8SThomas Gleixner xorl %ebx,%ebx 2739a163ed8SThomas Gleixner incl %ebx 2749a163ed8SThomas Gleixner 2759a163ed8SThomas Gleixner#endif /* CONFIG_SMP */ 2769a163ed8SThomas Gleixner3: 2779a163ed8SThomas Gleixner 2789a163ed8SThomas Gleixner/* 2799a163ed8SThomas Gleixner * Enable paging 2809a163ed8SThomas Gleixner */ 2819a163ed8SThomas Gleixner movl $swapper_pg_dir-__PAGE_OFFSET,%eax 2829a163ed8SThomas Gleixner movl %eax,%cr3 /* set the page table pointer.. */ 2839a163ed8SThomas Gleixner movl %cr0,%eax 2849a163ed8SThomas Gleixner orl $0x80000000,%eax 2859a163ed8SThomas Gleixner movl %eax,%cr0 /* ..and set paging (PG) bit */ 2869a163ed8SThomas Gleixner ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 2879a163ed8SThomas Gleixner1: 2889a163ed8SThomas Gleixner /* Set up the stack pointer */ 2899a163ed8SThomas Gleixner lss stack_start,%esp 2909a163ed8SThomas Gleixner 2919a163ed8SThomas Gleixner/* 2929a163ed8SThomas Gleixner * Initialize eflags. Some BIOS's leave bits like NT set. This would 2939a163ed8SThomas Gleixner * confuse the debugger if this code is traced. 2949a163ed8SThomas Gleixner * XXX - best to initialize before switching to protected mode. 2959a163ed8SThomas Gleixner */ 2969a163ed8SThomas Gleixner pushl $0 2979a163ed8SThomas Gleixner popfl 2989a163ed8SThomas Gleixner 2999a163ed8SThomas Gleixner#ifdef CONFIG_SMP 3009a163ed8SThomas Gleixner andl %ebx,%ebx 3019a163ed8SThomas Gleixner jz 1f /* Initial CPU cleans BSS */ 3029a163ed8SThomas Gleixner jmp checkCPUtype 3039a163ed8SThomas Gleixner1: 3049a163ed8SThomas Gleixner#endif /* CONFIG_SMP */ 3059a163ed8SThomas Gleixner 3069a163ed8SThomas Gleixner/* 3079a163ed8SThomas Gleixner * start system 32-bit setup. We need to re-do some of the things done 3089a163ed8SThomas Gleixner * in 16-bit mode for the "real" operations. 3099a163ed8SThomas Gleixner */ 3109a163ed8SThomas Gleixner call setup_idt 3119a163ed8SThomas Gleixner 3129a163ed8SThomas GleixnercheckCPUtype: 3139a163ed8SThomas Gleixner 3149a163ed8SThomas Gleixner movl $-1,X86_CPUID # -1 for no CPUID initially 3159a163ed8SThomas Gleixner 3169a163ed8SThomas Gleixner/* check if it is 486 or 386. */ 3179a163ed8SThomas Gleixner/* 3189a163ed8SThomas Gleixner * XXX - this does a lot of unnecessary setup. Alignment checks don't 3199a163ed8SThomas Gleixner * apply at our cpl of 0 and the stack ought to be aligned already, and 3209a163ed8SThomas Gleixner * we don't need to preserve eflags. 3219a163ed8SThomas Gleixner */ 3229a163ed8SThomas Gleixner 3239a163ed8SThomas Gleixner movb $3,X86 # at least 386 3249a163ed8SThomas Gleixner pushfl # push EFLAGS 3259a163ed8SThomas Gleixner popl %eax # get EFLAGS 3269a163ed8SThomas Gleixner movl %eax,%ecx # save original EFLAGS 3279a163ed8SThomas Gleixner xorl $0x240000,%eax # flip AC and ID bits in EFLAGS 3289a163ed8SThomas Gleixner pushl %eax # copy to EFLAGS 3299a163ed8SThomas Gleixner popfl # set EFLAGS 3309a163ed8SThomas Gleixner pushfl # get new EFLAGS 3319a163ed8SThomas Gleixner popl %eax # put it in eax 3329a163ed8SThomas Gleixner xorl %ecx,%eax # change in flags 3339a163ed8SThomas Gleixner pushl %ecx # restore original EFLAGS 3349a163ed8SThomas Gleixner popfl 3359a163ed8SThomas Gleixner testl $0x40000,%eax # check if AC bit changed 3369a163ed8SThomas Gleixner je is386 3379a163ed8SThomas Gleixner 3389a163ed8SThomas Gleixner movb $4,X86 # at least 486 3399a163ed8SThomas Gleixner testl $0x200000,%eax # check if ID bit changed 3409a163ed8SThomas Gleixner je is486 3419a163ed8SThomas Gleixner 3429a163ed8SThomas Gleixner /* get vendor info */ 3439a163ed8SThomas Gleixner xorl %eax,%eax # call CPUID with 0 -> return vendor ID 3449a163ed8SThomas Gleixner cpuid 3459a163ed8SThomas Gleixner movl %eax,X86_CPUID # save CPUID level 3469a163ed8SThomas Gleixner movl %ebx,X86_VENDOR_ID # lo 4 chars 3479a163ed8SThomas Gleixner movl %edx,X86_VENDOR_ID+4 # next 4 chars 3489a163ed8SThomas Gleixner movl %ecx,X86_VENDOR_ID+8 # last 4 chars 3499a163ed8SThomas Gleixner 3509a163ed8SThomas Gleixner orl %eax,%eax # do we have processor info as well? 3519a163ed8SThomas Gleixner je is486 3529a163ed8SThomas Gleixner 3539a163ed8SThomas Gleixner movl $1,%eax # Use the CPUID instruction to get CPU type 3549a163ed8SThomas Gleixner cpuid 3559a163ed8SThomas Gleixner movb %al,%cl # save reg for future use 3569a163ed8SThomas Gleixner andb $0x0f,%ah # mask processor family 3579a163ed8SThomas Gleixner movb %ah,X86 3589a163ed8SThomas Gleixner andb $0xf0,%al # mask model 3599a163ed8SThomas Gleixner shrb $4,%al 3609a163ed8SThomas Gleixner movb %al,X86_MODEL 3619a163ed8SThomas Gleixner andb $0x0f,%cl # mask mask revision 3629a163ed8SThomas Gleixner movb %cl,X86_MASK 3639a163ed8SThomas Gleixner movl %edx,X86_CAPABILITY 3649a163ed8SThomas Gleixner 3659a163ed8SThomas Gleixneris486: movl $0x50022,%ecx # set AM, WP, NE and MP 3669a163ed8SThomas Gleixner jmp 2f 3679a163ed8SThomas Gleixner 3689a163ed8SThomas Gleixneris386: movl $2,%ecx # set MP 3699a163ed8SThomas Gleixner2: movl %cr0,%eax 3709a163ed8SThomas Gleixner andl $0x80000011,%eax # Save PG,PE,ET 3719a163ed8SThomas Gleixner orl %ecx,%eax 3729a163ed8SThomas Gleixner movl %eax,%cr0 3739a163ed8SThomas Gleixner 3749a163ed8SThomas Gleixner call check_x87 3759a163ed8SThomas Gleixner lgdt early_gdt_descr 3769a163ed8SThomas Gleixner lidt idt_descr 3779a163ed8SThomas Gleixner ljmp $(__KERNEL_CS),$1f 3789a163ed8SThomas Gleixner1: movl $(__KERNEL_DS),%eax # reload all the segment registers 3799a163ed8SThomas Gleixner movl %eax,%ss # after changing gdt. 3809a163ed8SThomas Gleixner movl %eax,%fs # gets reset once there's real percpu 3819a163ed8SThomas Gleixner 3829a163ed8SThomas Gleixner movl $(__USER_DS),%eax # DS/ES contains default USER segment 3839a163ed8SThomas Gleixner movl %eax,%ds 3849a163ed8SThomas Gleixner movl %eax,%es 3859a163ed8SThomas Gleixner 3869a163ed8SThomas Gleixner xorl %eax,%eax # Clear GS and LDT 3879a163ed8SThomas Gleixner movl %eax,%gs 3889a163ed8SThomas Gleixner lldt %ax 3899a163ed8SThomas Gleixner 3909a163ed8SThomas Gleixner cld # gcc2 wants the direction flag cleared at all times 3919a163ed8SThomas Gleixner pushl $0 # fake return address for unwinder 3929a163ed8SThomas Gleixner#ifdef CONFIG_SMP 3939a163ed8SThomas Gleixner movb ready, %cl 3949a163ed8SThomas Gleixner movb $1, ready 3959a163ed8SThomas Gleixner cmpb $0,%cl # the first CPU calls start_kernel 3969a163ed8SThomas Gleixner je 1f 3979a163ed8SThomas Gleixner movl $(__KERNEL_PERCPU), %eax 3989a163ed8SThomas Gleixner movl %eax,%fs # set this cpu's percpu 3999a163ed8SThomas Gleixner jmp initialize_secondary # all other CPUs call initialize_secondary 4009a163ed8SThomas Gleixner1: 4019a163ed8SThomas Gleixner#endif /* CONFIG_SMP */ 4029a163ed8SThomas Gleixner jmp start_kernel 4039a163ed8SThomas Gleixner 4049a163ed8SThomas Gleixner/* 4059a163ed8SThomas Gleixner * We depend on ET to be correct. This checks for 287/387. 4069a163ed8SThomas Gleixner */ 4079a163ed8SThomas Gleixnercheck_x87: 4089a163ed8SThomas Gleixner movb $0,X86_HARD_MATH 4099a163ed8SThomas Gleixner clts 4109a163ed8SThomas Gleixner fninit 4119a163ed8SThomas Gleixner fstsw %ax 4129a163ed8SThomas Gleixner cmpb $0,%al 4139a163ed8SThomas Gleixner je 1f 4149a163ed8SThomas Gleixner movl %cr0,%eax /* no coprocessor: have to set bits */ 4159a163ed8SThomas Gleixner xorl $4,%eax /* set EM */ 4169a163ed8SThomas Gleixner movl %eax,%cr0 4179a163ed8SThomas Gleixner ret 4189a163ed8SThomas Gleixner ALIGN 4199a163ed8SThomas Gleixner1: movb $1,X86_HARD_MATH 4209a163ed8SThomas Gleixner .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */ 4219a163ed8SThomas Gleixner ret 4229a163ed8SThomas Gleixner 4239a163ed8SThomas Gleixner/* 4249a163ed8SThomas Gleixner * setup_idt 4259a163ed8SThomas Gleixner * 4269a163ed8SThomas Gleixner * sets up a idt with 256 entries pointing to 4279a163ed8SThomas Gleixner * ignore_int, interrupt gates. It doesn't actually load 4289a163ed8SThomas Gleixner * idt - that can be done only after paging has been enabled 4299a163ed8SThomas Gleixner * and the kernel moved to PAGE_OFFSET. Interrupts 4309a163ed8SThomas Gleixner * are enabled elsewhere, when we can be relatively 4319a163ed8SThomas Gleixner * sure everything is ok. 4329a163ed8SThomas Gleixner * 4339a163ed8SThomas Gleixner * Warning: %esi is live across this function. 4349a163ed8SThomas Gleixner */ 4359a163ed8SThomas Gleixnersetup_idt: 4369a163ed8SThomas Gleixner lea ignore_int,%edx 4379a163ed8SThomas Gleixner movl $(__KERNEL_CS << 16),%eax 4389a163ed8SThomas Gleixner movw %dx,%ax /* selector = 0x0010 = cs */ 4399a163ed8SThomas Gleixner movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 4409a163ed8SThomas Gleixner 4419a163ed8SThomas Gleixner lea idt_table,%edi 4429a163ed8SThomas Gleixner mov $256,%ecx 4439a163ed8SThomas Gleixnerrp_sidt: 4449a163ed8SThomas Gleixner movl %eax,(%edi) 4459a163ed8SThomas Gleixner movl %edx,4(%edi) 4469a163ed8SThomas Gleixner addl $8,%edi 4479a163ed8SThomas Gleixner dec %ecx 4489a163ed8SThomas Gleixner jne rp_sidt 4499a163ed8SThomas Gleixner 4509a163ed8SThomas Gleixner.macro set_early_handler handler,trapno 4519a163ed8SThomas Gleixner lea \handler,%edx 4529a163ed8SThomas Gleixner movl $(__KERNEL_CS << 16),%eax 4539a163ed8SThomas Gleixner movw %dx,%ax 4549a163ed8SThomas Gleixner movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 4559a163ed8SThomas Gleixner lea idt_table,%edi 4569a163ed8SThomas Gleixner movl %eax,8*\trapno(%edi) 4579a163ed8SThomas Gleixner movl %edx,8*\trapno+4(%edi) 4589a163ed8SThomas Gleixner.endm 4599a163ed8SThomas Gleixner 4609a163ed8SThomas Gleixner set_early_handler handler=early_divide_err,trapno=0 4619a163ed8SThomas Gleixner set_early_handler handler=early_illegal_opcode,trapno=6 4629a163ed8SThomas Gleixner set_early_handler handler=early_protection_fault,trapno=13 4639a163ed8SThomas Gleixner set_early_handler handler=early_page_fault,trapno=14 4649a163ed8SThomas Gleixner 4659a163ed8SThomas Gleixner ret 4669a163ed8SThomas Gleixner 4679a163ed8SThomas Gleixnerearly_divide_err: 4689a163ed8SThomas Gleixner xor %edx,%edx 4699a163ed8SThomas Gleixner pushl $0 /* fake errcode */ 4709a163ed8SThomas Gleixner jmp early_fault 4719a163ed8SThomas Gleixner 4729a163ed8SThomas Gleixnerearly_illegal_opcode: 4739a163ed8SThomas Gleixner movl $6,%edx 4749a163ed8SThomas Gleixner pushl $0 /* fake errcode */ 4759a163ed8SThomas Gleixner jmp early_fault 4769a163ed8SThomas Gleixner 4779a163ed8SThomas Gleixnerearly_protection_fault: 4789a163ed8SThomas Gleixner movl $13,%edx 4799a163ed8SThomas Gleixner jmp early_fault 4809a163ed8SThomas Gleixner 4819a163ed8SThomas Gleixnerearly_page_fault: 4829a163ed8SThomas Gleixner movl $14,%edx 4839a163ed8SThomas Gleixner jmp early_fault 4849a163ed8SThomas Gleixner 4859a163ed8SThomas Gleixnerearly_fault: 4869a163ed8SThomas Gleixner cld 4879a163ed8SThomas Gleixner#ifdef CONFIG_PRINTK 488382f64abSIngo Molnar pusha 4899a163ed8SThomas Gleixner movl $(__KERNEL_DS),%eax 4909a163ed8SThomas Gleixner movl %eax,%ds 4919a163ed8SThomas Gleixner movl %eax,%es 4929a163ed8SThomas Gleixner cmpl $2,early_recursion_flag 4939a163ed8SThomas Gleixner je hlt_loop 4949a163ed8SThomas Gleixner incl early_recursion_flag 4959a163ed8SThomas Gleixner movl %cr2,%eax 4969a163ed8SThomas Gleixner pushl %eax 4979a163ed8SThomas Gleixner pushl %edx /* trapno */ 4989a163ed8SThomas Gleixner pushl $fault_msg 4999a163ed8SThomas Gleixner#ifdef CONFIG_EARLY_PRINTK 5009a163ed8SThomas Gleixner call early_printk 5019a163ed8SThomas Gleixner#else 5029a163ed8SThomas Gleixner call printk 5039a163ed8SThomas Gleixner#endif 5049a163ed8SThomas Gleixner#endif 5059a163ed8SThomas Gleixnerhlt_loop: 5069a163ed8SThomas Gleixner hlt 5079a163ed8SThomas Gleixner jmp hlt_loop 5089a163ed8SThomas Gleixner 5099a163ed8SThomas Gleixner/* This is the default interrupt "handler" :-) */ 5109a163ed8SThomas Gleixner ALIGN 5119a163ed8SThomas Gleixnerignore_int: 5129a163ed8SThomas Gleixner cld 5139a163ed8SThomas Gleixner#ifdef CONFIG_PRINTK 5149a163ed8SThomas Gleixner pushl %eax 5159a163ed8SThomas Gleixner pushl %ecx 5169a163ed8SThomas Gleixner pushl %edx 5179a163ed8SThomas Gleixner pushl %es 5189a163ed8SThomas Gleixner pushl %ds 5199a163ed8SThomas Gleixner movl $(__KERNEL_DS),%eax 5209a163ed8SThomas Gleixner movl %eax,%ds 5219a163ed8SThomas Gleixner movl %eax,%es 5229a163ed8SThomas Gleixner cmpl $2,early_recursion_flag 5239a163ed8SThomas Gleixner je hlt_loop 5249a163ed8SThomas Gleixner incl early_recursion_flag 5259a163ed8SThomas Gleixner pushl 16(%esp) 5269a163ed8SThomas Gleixner pushl 24(%esp) 5279a163ed8SThomas Gleixner pushl 32(%esp) 5289a163ed8SThomas Gleixner pushl 40(%esp) 5299a163ed8SThomas Gleixner pushl $int_msg 5309a163ed8SThomas Gleixner#ifdef CONFIG_EARLY_PRINTK 5319a163ed8SThomas Gleixner call early_printk 5329a163ed8SThomas Gleixner#else 5339a163ed8SThomas Gleixner call printk 5349a163ed8SThomas Gleixner#endif 5359a163ed8SThomas Gleixner addl $(5*4),%esp 5369a163ed8SThomas Gleixner popl %ds 5379a163ed8SThomas Gleixner popl %es 5389a163ed8SThomas Gleixner popl %edx 5399a163ed8SThomas Gleixner popl %ecx 5409a163ed8SThomas Gleixner popl %eax 5419a163ed8SThomas Gleixner#endif 5429a163ed8SThomas Gleixner iret 5439a163ed8SThomas Gleixner 5449a163ed8SThomas Gleixner.section .text 5459a163ed8SThomas Gleixner/* 5469a163ed8SThomas Gleixner * Real beginning of normal "text" segment 5479a163ed8SThomas Gleixner */ 5489a163ed8SThomas GleixnerENTRY(stext) 5499a163ed8SThomas GleixnerENTRY(_stext) 5509a163ed8SThomas Gleixner 5519a163ed8SThomas Gleixner/* 5529a163ed8SThomas Gleixner * BSS section 5539a163ed8SThomas Gleixner */ 5549a163ed8SThomas Gleixner.section ".bss.page_aligned","wa" 5559a163ed8SThomas Gleixner .align PAGE_SIZE_asm 5569a163ed8SThomas GleixnerENTRY(swapper_pg_dir) 5579a163ed8SThomas Gleixner .fill 1024,4,0 5589a163ed8SThomas GleixnerENTRY(swapper_pg_pmd) 5599a163ed8SThomas Gleixner .fill 1024,4,0 5609a163ed8SThomas GleixnerENTRY(empty_zero_page) 5619a163ed8SThomas Gleixner .fill 4096,1,0 5629a163ed8SThomas Gleixner 5639a163ed8SThomas Gleixner/* 5649a163ed8SThomas Gleixner * This starts the data section. 5659a163ed8SThomas Gleixner */ 5669a163ed8SThomas Gleixner.data 5679a163ed8SThomas GleixnerENTRY(stack_start) 5689a163ed8SThomas Gleixner .long init_thread_union+THREAD_SIZE 5699a163ed8SThomas Gleixner .long __BOOT_DS 5709a163ed8SThomas Gleixner 5719a163ed8SThomas Gleixnerready: .byte 0 5729a163ed8SThomas Gleixner 5739a163ed8SThomas Gleixnerearly_recursion_flag: 5749a163ed8SThomas Gleixner .long 0 5759a163ed8SThomas Gleixner 5769a163ed8SThomas Gleixnerint_msg: 5779a163ed8SThomas Gleixner .asciz "Unknown interrupt or fault at EIP %p %p %p\n" 5789a163ed8SThomas Gleixner 5799a163ed8SThomas Gleixnerfault_msg: 580382f64abSIngo Molnar .ascii \ 581382f64abSIngo Molnar/* fault info: */ "BUG: Int %d: CR2 %p\n" \ 582382f64abSIngo Molnar/* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \ 583382f64abSIngo Molnar " EBX %p EDX %p ECX %p EAX %p\n" \ 584382f64abSIngo Molnar/* fault frame: */ " err %p EIP %p CS %p flg %p\n" \ 585382f64abSIngo Molnar \ 586382f64abSIngo Molnar "Stack: %p %p %p %p %p %p %p %p\n" \ 587382f64abSIngo Molnar " %p %p %p %p %p %p %p %p\n" \ 588382f64abSIngo Molnar " %p %p %p %p %p %p %p %p\n" 5899a163ed8SThomas Gleixner 5909a163ed8SThomas Gleixner#include "../../x86/xen/xen-head.S" 5919a163ed8SThomas Gleixner 5929a163ed8SThomas Gleixner/* 5939a163ed8SThomas Gleixner * The IDT and GDT 'descriptors' are a strange 48-bit object 5949a163ed8SThomas Gleixner * only used by the lidt and lgdt instructions. They are not 5959a163ed8SThomas Gleixner * like usual segment descriptors - they consist of a 16-bit 5969a163ed8SThomas Gleixner * segment size, and 32-bit linear address value: 5979a163ed8SThomas Gleixner */ 5989a163ed8SThomas Gleixner 5999a163ed8SThomas Gleixner.globl boot_gdt_descr 6009a163ed8SThomas Gleixner.globl idt_descr 6019a163ed8SThomas Gleixner 6029a163ed8SThomas Gleixner ALIGN 6039a163ed8SThomas Gleixner# early boot GDT descriptor (must use 1:1 address mapping) 6049a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 6059a163ed8SThomas Gleixnerboot_gdt_descr: 6069a163ed8SThomas Gleixner .word __BOOT_DS+7 6079a163ed8SThomas Gleixner .long boot_gdt - __PAGE_OFFSET 6089a163ed8SThomas Gleixner 6099a163ed8SThomas Gleixner .word 0 # 32-bit align idt_desc.address 6109a163ed8SThomas Gleixneridt_descr: 6119a163ed8SThomas Gleixner .word IDT_ENTRIES*8-1 # idt contains 256 entries 6129a163ed8SThomas Gleixner .long idt_table 6139a163ed8SThomas Gleixner 6149a163ed8SThomas Gleixner# boot GDT descriptor (later on used by CPU#0): 6159a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 6169a163ed8SThomas GleixnerENTRY(early_gdt_descr) 6179a163ed8SThomas Gleixner .word GDT_ENTRIES*8-1 6189a163ed8SThomas Gleixner .long per_cpu__gdt_page /* Overwritten for secondary CPUs */ 6199a163ed8SThomas Gleixner 6209a163ed8SThomas Gleixner/* 6219a163ed8SThomas Gleixner * The boot_gdt must mirror the equivalent in setup.S and is 6229a163ed8SThomas Gleixner * used only for booting. 6239a163ed8SThomas Gleixner */ 6249a163ed8SThomas Gleixner .align L1_CACHE_BYTES 6259a163ed8SThomas GleixnerENTRY(boot_gdt) 6269a163ed8SThomas Gleixner .fill GDT_ENTRY_BOOT_CS,8,0 6279a163ed8SThomas Gleixner .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ 6289a163ed8SThomas Gleixner .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ 629