19a163ed8SThomas Gleixner/* 29a163ed8SThomas Gleixner * 39a163ed8SThomas Gleixner * Copyright (C) 1991, 1992 Linus Torvalds 49a163ed8SThomas Gleixner * 59a163ed8SThomas Gleixner * Enhanced CPU detection and feature setting code by Mike Jagdis 69a163ed8SThomas Gleixner * and Martin Mares, November 1997. 79a163ed8SThomas Gleixner */ 89a163ed8SThomas Gleixner 99a163ed8SThomas Gleixner.text 109a163ed8SThomas Gleixner#include <linux/threads.h> 118b2f7fffSSam Ravnborg#include <linux/init.h> 129a163ed8SThomas Gleixner#include <linux/linkage.h> 139a163ed8SThomas Gleixner#include <asm/segment.h> 140341c14dSJeremy Fitzhardinge#include <asm/page_types.h> 150341c14dSJeremy Fitzhardinge#include <asm/pgtable_types.h> 169a163ed8SThomas Gleixner#include <asm/cache.h> 179a163ed8SThomas Gleixner#include <asm/thread_info.h> 189a163ed8SThomas Gleixner#include <asm/asm-offsets.h> 199a163ed8SThomas Gleixner#include <asm/setup.h> 20551889a6SIan Campbell#include <asm/processor-flags.h> 218a50e513SH. Peter Anvin#include <asm/msr-index.h> 22cd4d09ecSBorislav Petkov#include <asm/cpufeatures.h> 2360a5317fSTejun Heo#include <asm/percpu.h> 244c5023a3SH. Peter Anvin#include <asm/nops.h> 25fb148d83SAlexander Kuleshov#include <asm/bootparam.h> 26784d5699SAl Viro#include <asm/export.h> 27551889a6SIan Campbell 28551889a6SIan Campbell/* Physical address */ 29551889a6SIan Campbell#define pa(X) ((X) - __PAGE_OFFSET) 309a163ed8SThomas Gleixner 319a163ed8SThomas Gleixner/* 329a163ed8SThomas Gleixner * References to members of the new_cpu_data structure. 339a163ed8SThomas Gleixner */ 349a163ed8SThomas Gleixner 359a163ed8SThomas Gleixner#define X86 new_cpu_data+CPUINFO_x86 369a163ed8SThomas Gleixner#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 379a163ed8SThomas Gleixner#define X86_MODEL new_cpu_data+CPUINFO_x86_model 389a163ed8SThomas Gleixner#define X86_MASK new_cpu_data+CPUINFO_x86_mask 399a163ed8SThomas Gleixner#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 409a163ed8SThomas Gleixner#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 419a163ed8SThomas Gleixner#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 429a163ed8SThomas Gleixner#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 439a163ed8SThomas Gleixner 449a163ed8SThomas Gleixner/* 45c090f532SJeremy Fitzhardinge * This is how much memory in addition to the memory covered up to 46c090f532SJeremy Fitzhardinge * and including _end we need mapped initially. 479a163ed8SThomas Gleixner * We need: 482bd2753fSYinghai Lu * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) 492bd2753fSYinghai Lu * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) 509a163ed8SThomas Gleixner * 519a163ed8SThomas Gleixner * Modulo rounding, each megabyte assigned here requires a kilobyte of 529a163ed8SThomas Gleixner * memory, which is currently unreclaimed. 539a163ed8SThomas Gleixner * 549a163ed8SThomas Gleixner * This should be a multiple of a page. 552bd2753fSYinghai Lu * 562bd2753fSYinghai Lu * KERNEL_IMAGE_SIZE should be greater than pa(_end) 572bd2753fSYinghai Lu * and small than max_low_pfn, otherwise will waste some page table entries 589a163ed8SThomas Gleixner */ 599a163ed8SThomas Gleixner 609a163ed8SThomas Gleixner#if PTRS_PER_PMD > 1 61c090f532SJeremy Fitzhardinge#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) 629a163ed8SThomas Gleixner#else 63c090f532SJeremy Fitzhardinge#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 649a163ed8SThomas Gleixner#endif 659a163ed8SThomas Gleixner 6604c17341SBorislav Petkov/* 6704c17341SBorislav Petkov * Number of possible pages in the lowmem region. 6804c17341SBorislav Petkov * 6904c17341SBorislav Petkov * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a 7004c17341SBorislav Petkov * gas warning about overflowing shift count when gas has been compiled 7104c17341SBorislav Petkov * with only a host target support using a 32-bit type for internal 7204c17341SBorislav Petkov * representation. 7304c17341SBorislav Petkov */ 7404c17341SBorislav PetkovLOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT) 75147dd561SH. Peter Anvin 76c090f532SJeremy Fitzhardinge/* Enough space to fit pagetables for the low memory linear map */ 77147dd561SH. Peter AnvinMAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT 78c090f532SJeremy Fitzhardinge 79c090f532SJeremy Fitzhardinge/* 80c090f532SJeremy Fitzhardinge * Worst-case size of the kernel mapping we need to make: 81147dd561SH. Peter Anvin * a relocatable kernel can live anywhere in lowmem, so we need to be able 82147dd561SH. Peter Anvin * to map all of lowmem. 83c090f532SJeremy Fitzhardinge */ 84147dd561SH. Peter AnvinKERNEL_PAGES = LOWMEM_PAGES 85c090f532SJeremy Fitzhardinge 867bf04be8SStratos PsomadakisINIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE 872bd2753fSYinghai LuRESERVE_BRK(pagetables, INIT_MAP_SIZE) 88796216a5SJeremy Fitzhardinge 899a163ed8SThomas Gleixner/* 909a163ed8SThomas Gleixner * 32-bit kernel entrypoint; only used by the boot CPU. On entry, 919a163ed8SThomas Gleixner * %esi points to the real-mode code as a 32-bit pointer. 929a163ed8SThomas Gleixner * CS and DS must be 4 GB flat segments, but we don't depend on 939a163ed8SThomas Gleixner * any particular GDT layout, because we load our own as soon as we 949a163ed8SThomas Gleixner * can. 959a163ed8SThomas Gleixner */ 964ae59b91STim Abbott__HEAD 979a163ed8SThomas GleixnerENTRY(startup_32) 98b32f96c7SJosh Poimboeuf movl pa(initial_stack),%ecx 9911d4c3f9SH. Peter Anvin 100a24e7851SRusty Russell /* test KEEP_SEGMENTS flag to see if the bootloader is asking 101a24e7851SRusty Russell us to not reload segments */ 102fb148d83SAlexander Kuleshov testb $KEEP_SEGMENTS, BP_loadflags(%esi) 103a24e7851SRusty Russell jnz 2f 1049a163ed8SThomas Gleixner 1059a163ed8SThomas Gleixner/* 1069a163ed8SThomas Gleixner * Set segments to known values. 1079a163ed8SThomas Gleixner */ 108551889a6SIan Campbell lgdt pa(boot_gdt_descr) 1099a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 1109a163ed8SThomas Gleixner movl %eax,%ds 1119a163ed8SThomas Gleixner movl %eax,%es 1129a163ed8SThomas Gleixner movl %eax,%fs 1139a163ed8SThomas Gleixner movl %eax,%gs 11411d4c3f9SH. Peter Anvin movl %eax,%ss 115a24e7851SRusty Russell2: 11611d4c3f9SH. Peter Anvin leal -__PAGE_OFFSET(%ecx),%esp 1179a163ed8SThomas Gleixner 1189a163ed8SThomas Gleixner/* 1199a163ed8SThomas Gleixner * Clear BSS first so that there are no surprises... 1209a163ed8SThomas Gleixner */ 121a24e7851SRusty Russell cld 1229a163ed8SThomas Gleixner xorl %eax,%eax 123551889a6SIan Campbell movl $pa(__bss_start),%edi 124551889a6SIan Campbell movl $pa(__bss_stop),%ecx 1259a163ed8SThomas Gleixner subl %edi,%ecx 1269a163ed8SThomas Gleixner shrl $2,%ecx 1279a163ed8SThomas Gleixner rep ; stosl 1289a163ed8SThomas Gleixner/* 1299a163ed8SThomas Gleixner * Copy bootup parameters out of the way. 1309a163ed8SThomas Gleixner * Note: %esi still has the pointer to the real-mode data. 1319a163ed8SThomas Gleixner * With the kexec as boot loader, parameter segment might be loaded beyond 1329a163ed8SThomas Gleixner * kernel image and might not even be addressable by early boot page tables. 1339a163ed8SThomas Gleixner * (kexec on panic case). Hence copy out the parameters before initializing 1349a163ed8SThomas Gleixner * page tables. 1359a163ed8SThomas Gleixner */ 136551889a6SIan Campbell movl $pa(boot_params),%edi 1379a163ed8SThomas Gleixner movl $(PARAM_SIZE/4),%ecx 1389a163ed8SThomas Gleixner cld 1399a163ed8SThomas Gleixner rep 1409a163ed8SThomas Gleixner movsl 141551889a6SIan Campbell movl pa(boot_params) + NEW_CL_POINTER,%esi 1429a163ed8SThomas Gleixner andl %esi,%esi 143b595076aSUwe Kleine-König jz 1f # No command line 144551889a6SIan Campbell movl $pa(boot_command_line),%edi 1459a163ed8SThomas Gleixner movl $(COMMAND_LINE_SIZE/4),%ecx 1469a163ed8SThomas Gleixner rep 1479a163ed8SThomas Gleixner movsl 1489a163ed8SThomas Gleixner1: 1499a163ed8SThomas Gleixner 150dc3119e7SThomas Gleixner#ifdef CONFIG_OLPC 151fd699c76SAndres Salomon /* save OFW's pgdir table for later use when calling into OFW */ 152fd699c76SAndres Salomon movl %cr3, %eax 153fd699c76SAndres Salomon movl %eax, pa(olpc_ofw_pgd) 154fd699c76SAndres Salomon#endif 155fd699c76SAndres Salomon 156fe055896SBorislav Petkov#ifdef CONFIG_MICROCODE 15763b553c6SFenghua Yu /* Early load ucode on BSP. */ 15863b553c6SFenghua Yu call load_ucode_bsp 15963b553c6SFenghua Yu#endif 16063b553c6SFenghua Yu 1619a163ed8SThomas Gleixner/* 1629a163ed8SThomas Gleixner * Initialize page tables. This creates a PDE and a set of page 1632bd2753fSYinghai Lu * tables, which are located immediately beyond __brk_base. The variable 164ccf3fe02SJeremy Fitzhardinge * _brk_end is set up to point to the first "safe" location. 1659a163ed8SThomas Gleixner * Mappings are created both at virtual address 0 (identity mapping) 1662bd2753fSYinghai Lu * and PAGE_OFFSET for up to _end. 1679a163ed8SThomas Gleixner */ 168551889a6SIan Campbell#ifdef CONFIG_X86_PAE 169551889a6SIan Campbell 170551889a6SIan Campbell /* 171b40827faSBorislav Petkov * In PAE mode initial_page_table is statically defined to contain 172b40827faSBorislav Petkov * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 173b40827faSBorislav Petkov * entries). The identity mapping is handled by pointing two PGD entries 174b40827faSBorislav Petkov * to the first kernel PMD. 175551889a6SIan Campbell * 176b40827faSBorislav Petkov * Note the upper half of each PMD or PTE are always zero at this stage. 177551889a6SIan Campbell */ 178551889a6SIan Campbell 17986b2b70eSJoe Korty#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 180551889a6SIan Campbell 181551889a6SIan Campbell xorl %ebx,%ebx /* %ebx is kept at zero */ 182551889a6SIan Campbell 183ccf3fe02SJeremy Fitzhardinge movl $pa(__brk_base), %edi 184b40827faSBorislav Petkov movl $pa(initial_pg_pmd), %edx 185b2bc2731SSuresh Siddha movl $PTE_IDENT_ATTR, %eax 1869a163ed8SThomas Gleixner10: 187b2bc2731SSuresh Siddha leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ 188551889a6SIan Campbell movl %ecx,(%edx) /* Store PMD entry */ 189551889a6SIan Campbell /* Upper half already zero */ 190551889a6SIan Campbell addl $8,%edx 191551889a6SIan Campbell movl $512,%ecx 192551889a6SIan Campbell11: 193551889a6SIan Campbell stosl 194551889a6SIan Campbell xchgl %eax,%ebx 195551889a6SIan Campbell stosl 196551889a6SIan Campbell xchgl %eax,%ebx 197551889a6SIan Campbell addl $0x1000,%eax 198551889a6SIan Campbell loop 11b 199551889a6SIan Campbell 200551889a6SIan Campbell /* 201c090f532SJeremy Fitzhardinge * End condition: we must map up to the end + MAPPING_BEYOND_END. 202551889a6SIan Campbell */ 203c090f532SJeremy Fitzhardinge movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 204551889a6SIan Campbell cmpl %ebp,%eax 205551889a6SIan Campbell jb 10b 206551889a6SIan Campbell1: 207ccf3fe02SJeremy Fitzhardinge addl $__PAGE_OFFSET, %edi 208ccf3fe02SJeremy Fitzhardinge movl %edi, pa(_brk_end) 2096af61a76SYinghai Lu shrl $12, %eax 2106af61a76SYinghai Lu movl %eax, pa(max_pfn_mapped) 211551889a6SIan Campbell 212551889a6SIan Campbell /* Do early initialization of the fixmap area */ 213b40827faSBorislav Petkov movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 214b40827faSBorislav Petkov movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 215551889a6SIan Campbell#else /* Not PAE */ 216551889a6SIan Campbell 217551889a6SIan Campbellpage_pde_offset = (__PAGE_OFFSET >> 20); 218551889a6SIan Campbell 219ccf3fe02SJeremy Fitzhardinge movl $pa(__brk_base), %edi 220b40827faSBorislav Petkov movl $pa(initial_page_table), %edx 221b2bc2731SSuresh Siddha movl $PTE_IDENT_ATTR, %eax 222551889a6SIan Campbell10: 223b2bc2731SSuresh Siddha leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ 2249a163ed8SThomas Gleixner movl %ecx,(%edx) /* Store identity PDE entry */ 2259a163ed8SThomas Gleixner movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 2269a163ed8SThomas Gleixner addl $4,%edx 2279a163ed8SThomas Gleixner movl $1024, %ecx 2289a163ed8SThomas Gleixner11: 2299a163ed8SThomas Gleixner stosl 2309a163ed8SThomas Gleixner addl $0x1000,%eax 2319a163ed8SThomas Gleixner loop 11b 232551889a6SIan Campbell /* 233c090f532SJeremy Fitzhardinge * End condition: we must map up to the end + MAPPING_BEYOND_END. 234551889a6SIan Campbell */ 235c090f532SJeremy Fitzhardinge movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 2369a163ed8SThomas Gleixner cmpl %ebp,%eax 2379a163ed8SThomas Gleixner jb 10b 238ccf3fe02SJeremy Fitzhardinge addl $__PAGE_OFFSET, %edi 239ccf3fe02SJeremy Fitzhardinge movl %edi, pa(_brk_end) 2406af61a76SYinghai Lu shrl $12, %eax 2416af61a76SYinghai Lu movl %eax, pa(max_pfn_mapped) 2429a163ed8SThomas Gleixner 243551889a6SIan Campbell /* Do early initialization of the fixmap area */ 244b40827faSBorislav Petkov movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 245b40827faSBorislav Petkov movl %eax,pa(initial_page_table+0xffc) 246551889a6SIan Campbell#endif 247d50d8fe1SRusty Russell 248d50d8fe1SRusty Russell#ifdef CONFIG_PARAVIRT 249d50d8fe1SRusty Russell /* This is can only trip for a broken bootloader... */ 250d50d8fe1SRusty Russell cmpw $0x207, pa(boot_params + BP_version) 251d50d8fe1SRusty Russell jb default_entry 252d50d8fe1SRusty Russell 253d50d8fe1SRusty Russell /* Paravirt-compatible boot parameters. Look to see what architecture 254d50d8fe1SRusty Russell we're booting under. */ 255d50d8fe1SRusty Russell movl pa(boot_params + BP_hardware_subarch), %eax 256d50d8fe1SRusty Russell cmpl $num_subarch_entries, %eax 257d50d8fe1SRusty Russell jae bad_subarch 258d50d8fe1SRusty Russell 259d50d8fe1SRusty Russell movl pa(subarch_entries)(,%eax,4), %eax 260d50d8fe1SRusty Russell subl $__PAGE_OFFSET, %eax 261d50d8fe1SRusty Russell jmp *%eax 262d50d8fe1SRusty Russell 263d50d8fe1SRusty Russellbad_subarch: 264d50d8fe1SRusty RussellWEAK(lguest_entry) 265d50d8fe1SRusty RussellWEAK(xen_entry) 266d50d8fe1SRusty Russell /* Unknown implementation; there's really 267d50d8fe1SRusty Russell nothing we can do at this point. */ 268d50d8fe1SRusty Russell ud2a 269d50d8fe1SRusty Russell 270d50d8fe1SRusty Russell __INITDATA 271d50d8fe1SRusty Russell 272d50d8fe1SRusty Russellsubarch_entries: 273d50d8fe1SRusty Russell .long default_entry /* normal x86/PC */ 274d50d8fe1SRusty Russell .long lguest_entry /* lguest hypervisor */ 275d50d8fe1SRusty Russell .long xen_entry /* Xen hypervisor */ 276d50d8fe1SRusty Russell .long default_entry /* Moorestown MID */ 277d50d8fe1SRusty Russellnum_subarch_entries = (. - subarch_entries) / 4 278d50d8fe1SRusty Russell.previous 279d50d8fe1SRusty Russell#else 280d50d8fe1SRusty Russell jmp default_entry 281d50d8fe1SRusty Russell#endif /* CONFIG_PARAVIRT */ 282d50d8fe1SRusty Russell 2833e2a0cc3SFenghua Yu#ifdef CONFIG_HOTPLUG_CPU 2843e2a0cc3SFenghua Yu/* 2853e2a0cc3SFenghua Yu * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 2863e2a0cc3SFenghua Yu * up already except stack. We just set up stack here. Then call 2873e2a0cc3SFenghua Yu * start_secondary(). 2883e2a0cc3SFenghua Yu */ 2893e2a0cc3SFenghua YuENTRY(start_cpu0) 290b32f96c7SJosh Poimboeuf movl initial_stack, %ecx 2913e2a0cc3SFenghua Yu movl %ecx, %esp 2923e2a0cc3SFenghua Yu jmp *(initial_code) 2933e2a0cc3SFenghua YuENDPROC(start_cpu0) 2943e2a0cc3SFenghua Yu#endif 2953e2a0cc3SFenghua Yu 2969a163ed8SThomas Gleixner/* 2979a163ed8SThomas Gleixner * Non-boot CPU entry point; entered from trampoline.S 2989a163ed8SThomas Gleixner * We can't lgdt here, because lgdt itself uses a data segment, but 2999a163ed8SThomas Gleixner * we know the trampoline has already loaded the boot_gdt for us. 3009a163ed8SThomas Gleixner * 3019a163ed8SThomas Gleixner * If cpu hotplug is not supported then this code can go in init section 3029a163ed8SThomas Gleixner * which will be freed later 3039a163ed8SThomas Gleixner */ 3049a163ed8SThomas GleixnerENTRY(startup_32_smp) 3059a163ed8SThomas Gleixner cld 3069a163ed8SThomas Gleixner movl $(__BOOT_DS),%eax 3079a163ed8SThomas Gleixner movl %eax,%ds 3089a163ed8SThomas Gleixner movl %eax,%es 3099a163ed8SThomas Gleixner movl %eax,%fs 3109a163ed8SThomas Gleixner movl %eax,%gs 311b32f96c7SJosh Poimboeuf movl pa(initial_stack),%ecx 31211d4c3f9SH. Peter Anvin movl %eax,%ss 31311d4c3f9SH. Peter Anvin leal -__PAGE_OFFSET(%ecx),%esp 31448927bbbSJarkko Sakkinen 315fe055896SBorislav Petkov#ifdef CONFIG_MICROCODE 31663b553c6SFenghua Yu /* Early load ucode on AP. */ 31763b553c6SFenghua Yu call load_ucode_ap 31863b553c6SFenghua Yu#endif 31963b553c6SFenghua Yu 320d50d8fe1SRusty Russelldefault_entry: 321021ef050SH. Peter Anvin#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 322021ef050SH. Peter Anvin X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 323021ef050SH. Peter Anvin X86_CR0_PG) 324021ef050SH. Peter Anvin movl $(CR0_STATE & ~X86_CR0_PG),%eax 325021ef050SH. Peter Anvin movl %eax,%cr0 326021ef050SH. Peter Anvin 3279a163ed8SThomas Gleixner/* 3289efb58deSBorislav Petkov * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave 3299efb58deSBorislav Petkov * bits like NT set. This would confuse the debugger if this code is traced. So 3309efb58deSBorislav Petkov * initialize them properly now before switching to protected mode. That means 3319efb58deSBorislav Petkov * DF in particular (even though we have cleared it earlier after copying the 3329efb58deSBorislav Petkov * command line) because GCC expects it. 3339a163ed8SThomas Gleixner */ 3345a5a51dbSH. Peter Anvin pushl $0 3355a5a51dbSH. Peter Anvin popfl 3369efb58deSBorislav Petkov 3379efb58deSBorislav Petkov/* 3389efb58deSBorislav Petkov * New page tables may be in 4Mbyte page mode and may be using the global pages. 3399efb58deSBorislav Petkov * 3409efb58deSBorislav Petkov * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists 3419efb58deSBorislav Petkov * if and only if CPUID exists and has flags other than the FPU flag set. 3429efb58deSBorislav Petkov */ 3439efb58deSBorislav Petkov movl $-1,pa(X86_CPUID) # preset CPUID level 3449efb58deSBorislav Petkov movl $X86_EFLAGS_ID,%ecx 3459efb58deSBorislav Petkov pushl %ecx 3469efb58deSBorislav Petkov popfl # set EFLAGS=ID 3475a5a51dbSH. Peter Anvin pushfl 3489efb58deSBorislav Petkov popl %eax # get EFLAGS 3499efb58deSBorislav Petkov testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? 3505e2a044dSBorislav Petkov jz enable_paging # hw disallowed setting of ID bit 3519efb58deSBorislav Petkov # which means no CPUID and no CR4 3529efb58deSBorislav Petkov 3539efb58deSBorislav Petkov xorl %eax,%eax 3549efb58deSBorislav Petkov cpuid 3559efb58deSBorislav Petkov movl %eax,pa(X86_CPUID) # save largest std CPUID function 3565a5a51dbSH. Peter Anvin 3576662c34fSH. Peter Anvin movl $1,%eax 3586662c34fSH. Peter Anvin cpuid 3596662c34fSH. Peter Anvin andl $~1,%edx # Ignore CPUID.FPU 3605e2a044dSBorislav Petkov jz enable_paging # No flags or only CPUID.FPU = no CR4 3616662c34fSH. Peter Anvin 3625a5a51dbSH. Peter Anvin movl pa(mmu_cr4_features),%eax 3639a163ed8SThomas Gleixner movl %eax,%cr4 3649a163ed8SThomas Gleixner 3658a50e513SH. Peter Anvin testb $X86_CR4_PAE, %al # check if PAE is enabled 3665e2a044dSBorislav Petkov jz enable_paging 3679a163ed8SThomas Gleixner 3689a163ed8SThomas Gleixner /* Check if extended functions are implemented */ 3699a163ed8SThomas Gleixner movl $0x80000000, %eax 3709a163ed8SThomas Gleixner cpuid 3718a50e513SH. Peter Anvin /* Value must be in the range 0x80000001 to 0x8000ffff */ 3728a50e513SH. Peter Anvin subl $0x80000001, %eax 3738a50e513SH. Peter Anvin cmpl $(0x8000ffff-0x80000001), %eax 3745e2a044dSBorislav Petkov ja enable_paging 375ebba638aSKees Cook 376ebba638aSKees Cook /* Clear bogus XD_DISABLE bits */ 377ebba638aSKees Cook call verify_cpu 378ebba638aSKees Cook 3799a163ed8SThomas Gleixner mov $0x80000001, %eax 3809a163ed8SThomas Gleixner cpuid 3819a163ed8SThomas Gleixner /* Execute Disable bit supported? */ 3828a50e513SH. Peter Anvin btl $(X86_FEATURE_NX & 31), %edx 3835e2a044dSBorislav Petkov jnc enable_paging 3849a163ed8SThomas Gleixner 3859a163ed8SThomas Gleixner /* Setup EFER (Extended Feature Enable Register) */ 3868a50e513SH. Peter Anvin movl $MSR_EFER, %ecx 3879a163ed8SThomas Gleixner rdmsr 3889a163ed8SThomas Gleixner 3898a50e513SH. Peter Anvin btsl $_EFER_NX, %eax 3909a163ed8SThomas Gleixner /* Make changes effective */ 3919a163ed8SThomas Gleixner wrmsr 3929a163ed8SThomas Gleixner 3935e2a044dSBorislav Petkovenable_paging: 3949a163ed8SThomas Gleixner 3959a163ed8SThomas Gleixner/* 3969a163ed8SThomas Gleixner * Enable paging 3979a163ed8SThomas Gleixner */ 398b40827faSBorislav Petkov movl $pa(initial_page_table), %eax 3999a163ed8SThomas Gleixner movl %eax,%cr3 /* set the page table pointer.. */ 400021ef050SH. Peter Anvin movl $CR0_STATE,%eax 4019a163ed8SThomas Gleixner movl %eax,%cr0 /* ..and set paging (PG) bit */ 4029a163ed8SThomas Gleixner ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 4039a163ed8SThomas Gleixner1: 40411d4c3f9SH. Peter Anvin /* Shift the stack pointer to a virtual address */ 40511d4c3f9SH. Peter Anvin addl $__PAGE_OFFSET, %esp 4069a163ed8SThomas Gleixner 4079a163ed8SThomas Gleixner/* 4089a163ed8SThomas Gleixner * start system 32-bit setup. We need to re-do some of the things done 4099a163ed8SThomas Gleixner * in 16-bit mode for the "real" operations. 4109a163ed8SThomas Gleixner */ 4114c5023a3SH. Peter Anvin movl setup_once_ref,%eax 4124c5023a3SH. Peter Anvin andl %eax,%eax 4134c5023a3SH. Peter Anvin jz 1f # Did we do this already? 4144c5023a3SH. Peter Anvin call *%eax 4154c5023a3SH. Peter Anvin1: 4169a163ed8SThomas Gleixner 4179a163ed8SThomas Gleixner/* 418166df91dSBorislav Petkov * Check if it is 486 4199a163ed8SThomas Gleixner */ 420237d1548SWang YanQing movb $4,X86 # at least 486 421c3a22a26SBorislav Petkov cmpl $-1,X86_CPUID 4229a163ed8SThomas Gleixner je is486 4239a163ed8SThomas Gleixner 4249a163ed8SThomas Gleixner /* get vendor info */ 4259a163ed8SThomas Gleixner xorl %eax,%eax # call CPUID with 0 -> return vendor ID 4269a163ed8SThomas Gleixner cpuid 4279a163ed8SThomas Gleixner movl %eax,X86_CPUID # save CPUID level 4289a163ed8SThomas Gleixner movl %ebx,X86_VENDOR_ID # lo 4 chars 4299a163ed8SThomas Gleixner movl %edx,X86_VENDOR_ID+4 # next 4 chars 4309a163ed8SThomas Gleixner movl %ecx,X86_VENDOR_ID+8 # last 4 chars 4319a163ed8SThomas Gleixner 4329a163ed8SThomas Gleixner orl %eax,%eax # do we have processor info as well? 4339a163ed8SThomas Gleixner je is486 4349a163ed8SThomas Gleixner 4359a163ed8SThomas Gleixner movl $1,%eax # Use the CPUID instruction to get CPU type 4369a163ed8SThomas Gleixner cpuid 4379a163ed8SThomas Gleixner movb %al,%cl # save reg for future use 4389a163ed8SThomas Gleixner andb $0x0f,%ah # mask processor family 4399a163ed8SThomas Gleixner movb %ah,X86 4409a163ed8SThomas Gleixner andb $0xf0,%al # mask model 4419a163ed8SThomas Gleixner shrb $4,%al 4429a163ed8SThomas Gleixner movb %al,X86_MODEL 4439a163ed8SThomas Gleixner andb $0x0f,%cl # mask mask revision 4449a163ed8SThomas Gleixner movb %cl,X86_MASK 4459a163ed8SThomas Gleixner movl %edx,X86_CAPABILITY 4469a163ed8SThomas Gleixner 447c3a22a26SBorislav Petkovis486: 448c3a22a26SBorislav Petkov movl $0x50022,%ecx # set AM, WP, NE and MP 449166df91dSBorislav Petkov movl %cr0,%eax 4509a163ed8SThomas Gleixner andl $0x80000011,%eax # Save PG,PE,ET 4519a163ed8SThomas Gleixner orl %ecx,%eax 4529a163ed8SThomas Gleixner movl %eax,%cr0 4539a163ed8SThomas Gleixner 4549a163ed8SThomas Gleixner lgdt early_gdt_descr 4559a163ed8SThomas Gleixner lidt idt_descr 4569a163ed8SThomas Gleixner ljmp $(__KERNEL_CS),$1f 4579a163ed8SThomas Gleixner1: movl $(__KERNEL_DS),%eax # reload all the segment registers 4589a163ed8SThomas Gleixner movl %eax,%ss # after changing gdt. 4599a163ed8SThomas Gleixner 4609a163ed8SThomas Gleixner movl $(__USER_DS),%eax # DS/ES contains default USER segment 4619a163ed8SThomas Gleixner movl %eax,%ds 4629a163ed8SThomas Gleixner movl %eax,%es 4639a163ed8SThomas Gleixner 4640dd76d73SBrian Gerst movl $(__KERNEL_PERCPU), %eax 4650dd76d73SBrian Gerst movl %eax,%fs # set this cpu's percpu 4660dd76d73SBrian Gerst 46760a5317fSTejun Heo movl $(__KERNEL_STACK_CANARY),%eax 4689a163ed8SThomas Gleixner movl %eax,%gs 46960a5317fSTejun Heo 47060a5317fSTejun Heo xorl %eax,%eax # Clear LDT 4719a163ed8SThomas Gleixner lldt %ax 4729a163ed8SThomas Gleixner 4739a163ed8SThomas Gleixner pushl $0 # fake return address for unwinder 474e3f77edfSGlauber Costa jmp *(initial_code) 4759a163ed8SThomas Gleixner 4764c5023a3SH. Peter Anvin#include "verify_cpu.S" 4774c5023a3SH. Peter Anvin 4789a163ed8SThomas Gleixner/* 4794c5023a3SH. Peter Anvin * setup_once 4809a163ed8SThomas Gleixner * 4814c5023a3SH. Peter Anvin * The setup work we only want to run on the BSP. 4829a163ed8SThomas Gleixner * 4839a163ed8SThomas Gleixner * Warning: %esi is live across this function. 4849a163ed8SThomas Gleixner */ 4854c5023a3SH. Peter Anvin__INIT 4864c5023a3SH. Peter Anvinsetup_once: 4874c5023a3SH. Peter Anvin /* 488425be567SAndy Lutomirski * Set up a idt with 256 interrupt gates that push zero if there 489425be567SAndy Lutomirski * is no error code and then jump to early_idt_handler_common. 490425be567SAndy Lutomirski * It doesn't actually load the idt - that needs to be done on 491425be567SAndy Lutomirski * each CPU. Interrupts are enabled elsewhere, when we can be 492425be567SAndy Lutomirski * relatively sure everything is ok. 4934c5023a3SH. Peter Anvin */ 4944c5023a3SH. Peter Anvin 4954c5023a3SH. Peter Anvin movl $idt_table,%edi 496425be567SAndy Lutomirski movl $early_idt_handler_array,%eax 4974c5023a3SH. Peter Anvin movl $NUM_EXCEPTION_VECTORS,%ecx 4984c5023a3SH. Peter Anvin1: 4994c5023a3SH. Peter Anvin movl %eax,(%edi) 5004c5023a3SH. Peter Anvin movl %eax,4(%edi) 5014c5023a3SH. Peter Anvin /* interrupt gate, dpl=0, present */ 5024c5023a3SH. Peter Anvin movl $(0x8E000000 + __KERNEL_CS),2(%edi) 503425be567SAndy Lutomirski addl $EARLY_IDT_HANDLER_SIZE,%eax 5044c5023a3SH. Peter Anvin addl $8,%edi 5054c5023a3SH. Peter Anvin loop 1b 5064c5023a3SH. Peter Anvin 5074c5023a3SH. Peter Anvin movl $256 - NUM_EXCEPTION_VECTORS,%ecx 5084c5023a3SH. Peter Anvin movl $ignore_int,%edx 5099a163ed8SThomas Gleixner movl $(__KERNEL_CS << 16),%eax 5109a163ed8SThomas Gleixner movw %dx,%ax /* selector = 0x0010 = cs */ 5119a163ed8SThomas Gleixner movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 5124c5023a3SH. Peter Anvin2: 5139a163ed8SThomas Gleixner movl %eax,(%edi) 5149a163ed8SThomas Gleixner movl %edx,4(%edi) 5159a163ed8SThomas Gleixner addl $8,%edi 5164c5023a3SH. Peter Anvin loop 2b 5179a163ed8SThomas Gleixner 5184c5023a3SH. Peter Anvin#ifdef CONFIG_CC_STACKPROTECTOR 5194c5023a3SH. Peter Anvin /* 5204c5023a3SH. Peter Anvin * Configure the stack canary. The linker can't handle this by 5214c5023a3SH. Peter Anvin * relocation. Manually set base address in stack canary 5224c5023a3SH. Peter Anvin * segment descriptor. 5234c5023a3SH. Peter Anvin */ 5244c5023a3SH. Peter Anvin movl $gdt_page,%eax 5254c5023a3SH. Peter Anvin movl $stack_canary,%ecx 5264c5023a3SH. Peter Anvin movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 5274c5023a3SH. Peter Anvin shrl $16, %ecx 5284c5023a3SH. Peter Anvin movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 5294c5023a3SH. Peter Anvin movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) 5304c5023a3SH. Peter Anvin#endif 5319a163ed8SThomas Gleixner 5324c5023a3SH. Peter Anvin andl $0,setup_once_ref /* Once is enough, thanks */ 5339a163ed8SThomas Gleixner ret 5349a163ed8SThomas Gleixner 535425be567SAndy LutomirskiENTRY(early_idt_handler_array) 5364c5023a3SH. Peter Anvin # 36(%esp) %eflags 5374c5023a3SH. Peter Anvin # 32(%esp) %cs 5384c5023a3SH. Peter Anvin # 28(%esp) %eip 5394c5023a3SH. Peter Anvin # 24(%rsp) error code 5404c5023a3SH. Peter Anvin i = 0 5414c5023a3SH. Peter Anvin .rept NUM_EXCEPTION_VECTORS 542425be567SAndy Lutomirski .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 5434c5023a3SH. Peter Anvin pushl $0 # Dummy error code, to make stack frame uniform 5444c5023a3SH. Peter Anvin .endif 5454c5023a3SH. Peter Anvin pushl $i # 20(%esp) Vector number 546425be567SAndy Lutomirski jmp early_idt_handler_common 5474c5023a3SH. Peter Anvin i = i + 1 548425be567SAndy Lutomirski .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 5494c5023a3SH. Peter Anvin .endr 550425be567SAndy LutomirskiENDPROC(early_idt_handler_array) 5519a163ed8SThomas Gleixner 552425be567SAndy Lutomirskiearly_idt_handler_common: 553425be567SAndy Lutomirski /* 554425be567SAndy Lutomirski * The stack is the hardware frame, an error code or zero, and the 555425be567SAndy Lutomirski * vector number. 556425be567SAndy Lutomirski */ 5579a163ed8SThomas Gleixner cld 5585fa10196SH. Peter Anvin 5594c5023a3SH. Peter Anvin incl %ss:early_recursion_flag 5604c5023a3SH. Peter Anvin 5617bbcdb1cSAndy Lutomirski /* The vector number is in pt_regs->gs */ 5624c5023a3SH. Peter Anvin 5637bbcdb1cSAndy Lutomirski cld 5647bbcdb1cSAndy Lutomirski pushl %fs /* pt_regs->fs */ 5657bbcdb1cSAndy Lutomirski movw $0, 2(%esp) /* clear high bits (some CPUs leave garbage) */ 5667bbcdb1cSAndy Lutomirski pushl %es /* pt_regs->es */ 5677bbcdb1cSAndy Lutomirski movw $0, 2(%esp) /* clear high bits (some CPUs leave garbage) */ 5687bbcdb1cSAndy Lutomirski pushl %ds /* pt_regs->ds */ 5697bbcdb1cSAndy Lutomirski movw $0, 2(%esp) /* clear high bits (some CPUs leave garbage) */ 5707bbcdb1cSAndy Lutomirski pushl %eax /* pt_regs->ax */ 5717bbcdb1cSAndy Lutomirski pushl %ebp /* pt_regs->bp */ 5727bbcdb1cSAndy Lutomirski pushl %edi /* pt_regs->di */ 5737bbcdb1cSAndy Lutomirski pushl %esi /* pt_regs->si */ 5747bbcdb1cSAndy Lutomirski pushl %edx /* pt_regs->dx */ 5757bbcdb1cSAndy Lutomirski pushl %ecx /* pt_regs->cx */ 5767bbcdb1cSAndy Lutomirski pushl %ebx /* pt_regs->bx */ 5777bbcdb1cSAndy Lutomirski 5787bbcdb1cSAndy Lutomirski /* Fix up DS and ES */ 5797bbcdb1cSAndy Lutomirski movl $(__KERNEL_DS), %ecx 5807bbcdb1cSAndy Lutomirski movl %ecx, %ds 5817bbcdb1cSAndy Lutomirski movl %ecx, %es 5827bbcdb1cSAndy Lutomirski 5837bbcdb1cSAndy Lutomirski /* Load the vector number into EDX */ 5847bbcdb1cSAndy Lutomirski movl PT_GS(%esp), %edx 5857bbcdb1cSAndy Lutomirski 5867bbcdb1cSAndy Lutomirski /* Load GS into pt_regs->gs and clear high bits */ 5877bbcdb1cSAndy Lutomirski movw %gs, PT_GS(%esp) 5887bbcdb1cSAndy Lutomirski movw $0, PT_GS+2(%esp) 5897bbcdb1cSAndy Lutomirski 5907bbcdb1cSAndy Lutomirski movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */ 5914c5023a3SH. Peter Anvin call early_fixup_exception 5927bbcdb1cSAndy Lutomirski 5937bbcdb1cSAndy Lutomirski popl %ebx /* pt_regs->bx */ 5947bbcdb1cSAndy Lutomirski popl %ecx /* pt_regs->cx */ 5957bbcdb1cSAndy Lutomirski popl %edx /* pt_regs->dx */ 5967bbcdb1cSAndy Lutomirski popl %esi /* pt_regs->si */ 5977bbcdb1cSAndy Lutomirski popl %edi /* pt_regs->di */ 5987bbcdb1cSAndy Lutomirski popl %ebp /* pt_regs->bp */ 5997bbcdb1cSAndy Lutomirski popl %eax /* pt_regs->ax */ 6007bbcdb1cSAndy Lutomirski popl %ds /* pt_regs->ds */ 6017bbcdb1cSAndy Lutomirski popl %es /* pt_regs->es */ 6027bbcdb1cSAndy Lutomirski popl %fs /* pt_regs->fs */ 6037bbcdb1cSAndy Lutomirski popl %gs /* pt_regs->gs */ 6047bbcdb1cSAndy Lutomirski decl %ss:early_recursion_flag 6057bbcdb1cSAndy Lutomirski addl $4, %esp /* pop pt_regs->orig_ax */ 6067bbcdb1cSAndy Lutomirski iret 607425be567SAndy LutomirskiENDPROC(early_idt_handler_common) 6084c5023a3SH. Peter Anvin 6099a163ed8SThomas Gleixner/* This is the default interrupt "handler" :-) */ 6109a163ed8SThomas Gleixner ALIGN 6119a163ed8SThomas Gleixnerignore_int: 6129a163ed8SThomas Gleixner cld 6139a163ed8SThomas Gleixner#ifdef CONFIG_PRINTK 6149a163ed8SThomas Gleixner pushl %eax 6159a163ed8SThomas Gleixner pushl %ecx 6169a163ed8SThomas Gleixner pushl %edx 6179a163ed8SThomas Gleixner pushl %es 6189a163ed8SThomas Gleixner pushl %ds 6199a163ed8SThomas Gleixner movl $(__KERNEL_DS),%eax 6209a163ed8SThomas Gleixner movl %eax,%ds 6219a163ed8SThomas Gleixner movl %eax,%es 6229a163ed8SThomas Gleixner cmpl $2,early_recursion_flag 6239a163ed8SThomas Gleixner je hlt_loop 6249a163ed8SThomas Gleixner incl early_recursion_flag 6259a163ed8SThomas Gleixner pushl 16(%esp) 6269a163ed8SThomas Gleixner pushl 24(%esp) 6279a163ed8SThomas Gleixner pushl 32(%esp) 6289a163ed8SThomas Gleixner pushl 40(%esp) 6299a163ed8SThomas Gleixner pushl $int_msg 6309a163ed8SThomas Gleixner call printk 631d5e397cbSIngo Molnar 632d5e397cbSIngo Molnar call dump_stack 633d5e397cbSIngo Molnar 6349a163ed8SThomas Gleixner addl $(5*4),%esp 6359a163ed8SThomas Gleixner popl %ds 6369a163ed8SThomas Gleixner popl %es 6379a163ed8SThomas Gleixner popl %edx 6389a163ed8SThomas Gleixner popl %ecx 6399a163ed8SThomas Gleixner popl %eax 6409a163ed8SThomas Gleixner#endif 6419a163ed8SThomas Gleixner iret 6420e861fbbSAndy Lutomirski 6430e861fbbSAndy Lutomirskihlt_loop: 6440e861fbbSAndy Lutomirski hlt 6450e861fbbSAndy Lutomirski jmp hlt_loop 6464c5023a3SH. Peter AnvinENDPROC(ignore_int) 6474c5023a3SH. Peter Anvin__INITDATA 6484c5023a3SH. Peter Anvin .align 4 6490e861fbbSAndy LutomirskiGLOBAL(early_recursion_flag) 6504c5023a3SH. Peter Anvin .long 0 651ebba638aSKees Cook 6520e83815bSRobert Richter__REFDATA 653583323b9SThomas Gleixner .align 4 654583323b9SThomas GleixnerENTRY(initial_code) 655583323b9SThomas Gleixner .long i386_start_kernel 6564c5023a3SH. Peter AnvinENTRY(setup_once_ref) 6574c5023a3SH. Peter Anvin .long setup_once 658583323b9SThomas Gleixner 6599a163ed8SThomas Gleixner/* 6609a163ed8SThomas Gleixner * BSS section 6619a163ed8SThomas Gleixner */ 66202b7da37STim Abbott__PAGE_ALIGNED_BSS 6637bf04be8SStratos Psomadakis .align PAGE_SIZE 664551889a6SIan Campbell#ifdef CONFIG_X86_PAE 665d50d8fe1SRusty Russellinitial_pg_pmd: 666551889a6SIan Campbell .fill 1024*KPMDS,4,0 667551889a6SIan Campbell#else 668553bbc11SArnd Bergmann.globl initial_page_table 669553bbc11SArnd Bergmanninitial_page_table: 6709a163ed8SThomas Gleixner .fill 1024,4,0 671551889a6SIan Campbell#endif 672d50d8fe1SRusty Russellinitial_pg_fixmap: 6739a163ed8SThomas Gleixner .fill 1024,4,0 674553bbc11SArnd Bergmann.globl empty_zero_page 675553bbc11SArnd Bergmannempty_zero_page: 6769a163ed8SThomas Gleixner .fill 4096,1,0 677553bbc11SArnd Bergmann.globl swapper_pg_dir 678553bbc11SArnd Bergmannswapper_pg_dir: 679b40827faSBorislav Petkov .fill 1024,4,0 680784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page) 6812bd2753fSYinghai Lu 6829a163ed8SThomas Gleixner/* 6839a163ed8SThomas Gleixner * This starts the data section. 6849a163ed8SThomas Gleixner */ 685551889a6SIan Campbell#ifdef CONFIG_X86_PAE 686abe1ee3aSTim Abbott__PAGE_ALIGNED_DATA 687551889a6SIan Campbell /* Page-aligned for the benefit of paravirt? */ 6887bf04be8SStratos Psomadakis .align PAGE_SIZE 689b40827faSBorislav PetkovENTRY(initial_page_table) 690b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ 691551889a6SIan Campbell# if KPMDS == 3 692b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 693b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 694b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 695551889a6SIan Campbell# elif KPMDS == 2 696551889a6SIan Campbell .long 0,0 697b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 698b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 699551889a6SIan Campbell# elif KPMDS == 1 700551889a6SIan Campbell .long 0,0 701551889a6SIan Campbell .long 0,0 702b40827faSBorislav Petkov .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 703551889a6SIan Campbell# else 704551889a6SIan Campbell# error "Kernel PMDs should be 1, 2 or 3" 705551889a6SIan Campbell# endif 7067bf04be8SStratos Psomadakis .align PAGE_SIZE /* needs to be page-sized too */ 707551889a6SIan Campbell#endif 708551889a6SIan Campbell 7099a163ed8SThomas Gleixner.data 71011d4c3f9SH. Peter Anvin.balign 4 711b32f96c7SJosh PoimboeufENTRY(initial_stack) 7129a163ed8SThomas Gleixner .long init_thread_union+THREAD_SIZE 7139a163ed8SThomas Gleixner 7144c5023a3SH. Peter Anvin__INITRODATA 7159a163ed8SThomas Gleixnerint_msg: 716d5e397cbSIngo Molnar .asciz "Unknown interrupt or fault at: %p %p %p\n" 7179a163ed8SThomas Gleixner 7189a163ed8SThomas Gleixner#include "../../x86/xen/xen-head.S" 7199a163ed8SThomas Gleixner 7209a163ed8SThomas Gleixner/* 7219a163ed8SThomas Gleixner * The IDT and GDT 'descriptors' are a strange 48-bit object 7229a163ed8SThomas Gleixner * only used by the lidt and lgdt instructions. They are not 7239a163ed8SThomas Gleixner * like usual segment descriptors - they consist of a 16-bit 7249a163ed8SThomas Gleixner * segment size, and 32-bit linear address value: 7259a163ed8SThomas Gleixner */ 7269a163ed8SThomas Gleixner 7274c5023a3SH. Peter Anvin .data 7289a163ed8SThomas Gleixner.globl boot_gdt_descr 7299a163ed8SThomas Gleixner.globl idt_descr 7309a163ed8SThomas Gleixner 7319a163ed8SThomas Gleixner ALIGN 7329a163ed8SThomas Gleixner# early boot GDT descriptor (must use 1:1 address mapping) 7339a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 7349a163ed8SThomas Gleixnerboot_gdt_descr: 7359a163ed8SThomas Gleixner .word __BOOT_DS+7 7369a163ed8SThomas Gleixner .long boot_gdt - __PAGE_OFFSET 7379a163ed8SThomas Gleixner 7389a163ed8SThomas Gleixner .word 0 # 32-bit align idt_desc.address 7399a163ed8SThomas Gleixneridt_descr: 7409a163ed8SThomas Gleixner .word IDT_ENTRIES*8-1 # idt contains 256 entries 7419a163ed8SThomas Gleixner .long idt_table 7429a163ed8SThomas Gleixner 7439a163ed8SThomas Gleixner# boot GDT descriptor (later on used by CPU#0): 7449a163ed8SThomas Gleixner .word 0 # 32 bit align gdt_desc.address 7459a163ed8SThomas GleixnerENTRY(early_gdt_descr) 7469a163ed8SThomas Gleixner .word GDT_ENTRIES*8-1 747dd17c8f7SRusty Russell .long gdt_page /* Overwritten for secondary CPUs */ 7489a163ed8SThomas Gleixner 7499a163ed8SThomas Gleixner/* 7509a163ed8SThomas Gleixner * The boot_gdt must mirror the equivalent in setup.S and is 7519a163ed8SThomas Gleixner * used only for booting. 7529a163ed8SThomas Gleixner */ 7539a163ed8SThomas Gleixner .align L1_CACHE_BYTES 7549a163ed8SThomas GleixnerENTRY(boot_gdt) 7559a163ed8SThomas Gleixner .fill GDT_ENTRY_BOOT_CS,8,0 7569a163ed8SThomas Gleixner .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ 7579a163ed8SThomas Gleixner .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ 758