1/* 2 * 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * 5 * Enhanced CPU detection and feature setting code by Mike Jagdis 6 * and Martin Mares, November 1997. 7 */ 8 9.text 10#include <linux/threads.h> 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/segment.h> 14#include <asm/page_types.h> 15#include <asm/pgtable_types.h> 16#include <asm/desc.h> 17#include <asm/cache.h> 18#include <asm/thread_info.h> 19#include <asm/asm-offsets.h> 20#include <asm/setup.h> 21#include <asm/processor-flags.h> 22#include <asm/percpu.h> 23 24/* Physical address */ 25#define pa(X) ((X) - __PAGE_OFFSET) 26 27/* 28 * References to members of the new_cpu_data structure. 29 */ 30 31#define X86 new_cpu_data+CPUINFO_x86 32#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 33#define X86_MODEL new_cpu_data+CPUINFO_x86_model 34#define X86_MASK new_cpu_data+CPUINFO_x86_mask 35#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 36#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 37#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 38#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 39 40/* 41 * This is how much memory in addition to the memory covered up to 42 * and including _end we need mapped initially. 43 * We need: 44 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) 45 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) 46 * 47 * Modulo rounding, each megabyte assigned here requires a kilobyte of 48 * memory, which is currently unreclaimed. 49 * 50 * This should be a multiple of a page. 51 * 52 * KERNEL_IMAGE_SIZE should be greater than pa(_end) 53 * and small than max_low_pfn, otherwise will waste some page table entries 54 */ 55 56#if PTRS_PER_PMD > 1 57#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) 58#else 59#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 60#endif 61 62/* Enough space to fit pagetables for the low memory linear map */ 63MAPPING_BEYOND_END = \ 64 PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT 65 66/* 67 * Worst-case size of the kernel mapping we need to make: 68 * the worst-case size of the kernel itself, plus the extra we need 69 * to map for the linear map. 70 */ 71KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT 72 73INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm 74RESERVE_BRK(pagetables, INIT_MAP_SIZE) 75 76/* 77 * 32-bit kernel entrypoint; only used by the boot CPU. On entry, 78 * %esi points to the real-mode code as a 32-bit pointer. 79 * CS and DS must be 4 GB flat segments, but we don't depend on 80 * any particular GDT layout, because we load our own as soon as we 81 * can. 82 */ 83.section .text.head,"ax",@progbits 84ENTRY(startup_32) 85 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 86 us to not reload segments */ 87 testb $(1<<6), BP_loadflags(%esi) 88 jnz 2f 89 90/* 91 * Set segments to known values. 92 */ 93 lgdt pa(boot_gdt_descr) 94 movl $(__BOOT_DS),%eax 95 movl %eax,%ds 96 movl %eax,%es 97 movl %eax,%fs 98 movl %eax,%gs 992: 100 101/* 102 * Clear BSS first so that there are no surprises... 103 */ 104 cld 105 xorl %eax,%eax 106 movl $pa(__bss_start),%edi 107 movl $pa(__bss_stop),%ecx 108 subl %edi,%ecx 109 shrl $2,%ecx 110 rep ; stosl 111/* 112 * Copy bootup parameters out of the way. 113 * Note: %esi still has the pointer to the real-mode data. 114 * With the kexec as boot loader, parameter segment might be loaded beyond 115 * kernel image and might not even be addressable by early boot page tables. 116 * (kexec on panic case). Hence copy out the parameters before initializing 117 * page tables. 118 */ 119 movl $pa(boot_params),%edi 120 movl $(PARAM_SIZE/4),%ecx 121 cld 122 rep 123 movsl 124 movl pa(boot_params) + NEW_CL_POINTER,%esi 125 andl %esi,%esi 126 jz 1f # No comand line 127 movl $pa(boot_command_line),%edi 128 movl $(COMMAND_LINE_SIZE/4),%ecx 129 rep 130 movsl 1311: 132 133#ifdef CONFIG_PARAVIRT 134 /* This is can only trip for a broken bootloader... */ 135 cmpw $0x207, pa(boot_params + BP_version) 136 jb default_entry 137 138 /* Paravirt-compatible boot parameters. Look to see what architecture 139 we're booting under. */ 140 movl pa(boot_params + BP_hardware_subarch), %eax 141 cmpl $num_subarch_entries, %eax 142 jae bad_subarch 143 144 movl pa(subarch_entries)(,%eax,4), %eax 145 subl $__PAGE_OFFSET, %eax 146 jmp *%eax 147 148bad_subarch: 149WEAK(lguest_entry) 150WEAK(xen_entry) 151 /* Unknown implementation; there's really 152 nothing we can do at this point. */ 153 ud2a 154 155 __INITDATA 156 157subarch_entries: 158 .long default_entry /* normal x86/PC */ 159 .long lguest_entry /* lguest hypervisor */ 160 .long xen_entry /* Xen hypervisor */ 161num_subarch_entries = (. - subarch_entries) / 4 162.previous 163#endif /* CONFIG_PARAVIRT */ 164 165/* 166 * Initialize page tables. This creates a PDE and a set of page 167 * tables, which are located immediately beyond __brk_base. The variable 168 * _brk_end is set up to point to the first "safe" location. 169 * Mappings are created both at virtual address 0 (identity mapping) 170 * and PAGE_OFFSET for up to _end. 171 * 172 * Note that the stack is not yet set up! 173 */ 174default_entry: 175#ifdef CONFIG_X86_PAE 176 177 /* 178 * In PAE mode swapper_pg_dir is statically defined to contain enough 179 * entries to cover the VMSPLIT option (that is the top 1, 2 or 3 180 * entries). The identity mapping is handled by pointing two PGD 181 * entries to the first kernel PMD. 182 * 183 * Note the upper half of each PMD or PTE are always zero at 184 * this stage. 185 */ 186 187#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 188 189 xorl %ebx,%ebx /* %ebx is kept at zero */ 190 191 movl $pa(__brk_base), %edi 192 movl $pa(swapper_pg_pmd), %edx 193 movl $PTE_IDENT_ATTR, %eax 19410: 195 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ 196 movl %ecx,(%edx) /* Store PMD entry */ 197 /* Upper half already zero */ 198 addl $8,%edx 199 movl $512,%ecx 20011: 201 stosl 202 xchgl %eax,%ebx 203 stosl 204 xchgl %eax,%ebx 205 addl $0x1000,%eax 206 loop 11b 207 208 /* 209 * End condition: we must map up to the end + MAPPING_BEYOND_END. 210 */ 211 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 212 cmpl %ebp,%eax 213 jb 10b 2141: 215 addl $__PAGE_OFFSET, %edi 216 movl %edi, pa(_brk_end) 217 shrl $12, %eax 218 movl %eax, pa(max_pfn_mapped) 219 220 /* Do early initialization of the fixmap area */ 221 movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax 222 movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) 223#else /* Not PAE */ 224 225page_pde_offset = (__PAGE_OFFSET >> 20); 226 227 movl $pa(__brk_base), %edi 228 movl $pa(swapper_pg_dir), %edx 229 movl $PTE_IDENT_ATTR, %eax 23010: 231 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ 232 movl %ecx,(%edx) /* Store identity PDE entry */ 233 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 234 addl $4,%edx 235 movl $1024, %ecx 23611: 237 stosl 238 addl $0x1000,%eax 239 loop 11b 240 /* 241 * End condition: we must map up to the end + MAPPING_BEYOND_END. 242 */ 243 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 244 cmpl %ebp,%eax 245 jb 10b 246 addl $__PAGE_OFFSET, %edi 247 movl %edi, pa(_brk_end) 248 shrl $12, %eax 249 movl %eax, pa(max_pfn_mapped) 250 251 /* Do early initialization of the fixmap area */ 252 movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax 253 movl %eax,pa(swapper_pg_dir+0xffc) 254#endif 255 jmp 3f 256/* 257 * Non-boot CPU entry point; entered from trampoline.S 258 * We can't lgdt here, because lgdt itself uses a data segment, but 259 * we know the trampoline has already loaded the boot_gdt for us. 260 * 261 * If cpu hotplug is not supported then this code can go in init section 262 * which will be freed later 263 */ 264 265#ifndef CONFIG_HOTPLUG_CPU 266.section .init.text,"ax",@progbits 267#endif 268 269#ifdef CONFIG_SMP 270ENTRY(startup_32_smp) 271 cld 272 movl $(__BOOT_DS),%eax 273 movl %eax,%ds 274 movl %eax,%es 275 movl %eax,%fs 276 movl %eax,%gs 277#endif /* CONFIG_SMP */ 2783: 279 280/* 281 * New page tables may be in 4Mbyte page mode and may 282 * be using the global pages. 283 * 284 * NOTE! If we are on a 486 we may have no cr4 at all! 285 * So we do not try to touch it unless we really have 286 * some bits in it to set. This won't work if the BSP 287 * implements cr4 but this AP does not -- very unlikely 288 * but be warned! The same applies to the pse feature 289 * if not equally supported. --macro 290 * 291 * NOTE! We have to correct for the fact that we're 292 * not yet offset PAGE_OFFSET.. 293 */ 294#define cr4_bits pa(mmu_cr4_features) 295 movl cr4_bits,%edx 296 andl %edx,%edx 297 jz 6f 298 movl %cr4,%eax # Turn on paging options (PSE,PAE,..) 299 orl %edx,%eax 300 movl %eax,%cr4 301 302 btl $5, %eax # check if PAE is enabled 303 jnc 6f 304 305 /* Check if extended functions are implemented */ 306 movl $0x80000000, %eax 307 cpuid 308 cmpl $0x80000000, %eax 309 jbe 6f 310 mov $0x80000001, %eax 311 cpuid 312 /* Execute Disable bit supported? */ 313 btl $20, %edx 314 jnc 6f 315 316 /* Setup EFER (Extended Feature Enable Register) */ 317 movl $0xc0000080, %ecx 318 rdmsr 319 320 btsl $11, %eax 321 /* Make changes effective */ 322 wrmsr 323 3246: 325 326/* 327 * Enable paging 328 */ 329 movl $pa(swapper_pg_dir),%eax 330 movl %eax,%cr3 /* set the page table pointer.. */ 331 movl %cr0,%eax 332 orl $X86_CR0_PG,%eax 333 movl %eax,%cr0 /* ..and set paging (PG) bit */ 334 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 3351: 336 /* Set up the stack pointer */ 337 lss stack_start,%esp 338 339/* 340 * Initialize eflags. Some BIOS's leave bits like NT set. This would 341 * confuse the debugger if this code is traced. 342 * XXX - best to initialize before switching to protected mode. 343 */ 344 pushl $0 345 popfl 346 347#ifdef CONFIG_SMP 348 cmpb $0, ready 349 jz 1f /* Initial CPU cleans BSS */ 350 jmp checkCPUtype 3511: 352#endif /* CONFIG_SMP */ 353 354/* 355 * start system 32-bit setup. We need to re-do some of the things done 356 * in 16-bit mode for the "real" operations. 357 */ 358 call setup_idt 359 360checkCPUtype: 361 362 movl $-1,X86_CPUID # -1 for no CPUID initially 363 364/* check if it is 486 or 386. */ 365/* 366 * XXX - this does a lot of unnecessary setup. Alignment checks don't 367 * apply at our cpl of 0 and the stack ought to be aligned already, and 368 * we don't need to preserve eflags. 369 */ 370 371 movb $3,X86 # at least 386 372 pushfl # push EFLAGS 373 popl %eax # get EFLAGS 374 movl %eax,%ecx # save original EFLAGS 375 xorl $0x240000,%eax # flip AC and ID bits in EFLAGS 376 pushl %eax # copy to EFLAGS 377 popfl # set EFLAGS 378 pushfl # get new EFLAGS 379 popl %eax # put it in eax 380 xorl %ecx,%eax # change in flags 381 pushl %ecx # restore original EFLAGS 382 popfl 383 testl $0x40000,%eax # check if AC bit changed 384 je is386 385 386 movb $4,X86 # at least 486 387 testl $0x200000,%eax # check if ID bit changed 388 je is486 389 390 /* get vendor info */ 391 xorl %eax,%eax # call CPUID with 0 -> return vendor ID 392 cpuid 393 movl %eax,X86_CPUID # save CPUID level 394 movl %ebx,X86_VENDOR_ID # lo 4 chars 395 movl %edx,X86_VENDOR_ID+4 # next 4 chars 396 movl %ecx,X86_VENDOR_ID+8 # last 4 chars 397 398 orl %eax,%eax # do we have processor info as well? 399 je is486 400 401 movl $1,%eax # Use the CPUID instruction to get CPU type 402 cpuid 403 movb %al,%cl # save reg for future use 404 andb $0x0f,%ah # mask processor family 405 movb %ah,X86 406 andb $0xf0,%al # mask model 407 shrb $4,%al 408 movb %al,X86_MODEL 409 andb $0x0f,%cl # mask mask revision 410 movb %cl,X86_MASK 411 movl %edx,X86_CAPABILITY 412 413is486: movl $0x50022,%ecx # set AM, WP, NE and MP 414 jmp 2f 415 416is386: movl $2,%ecx # set MP 4172: movl %cr0,%eax 418 andl $0x80000011,%eax # Save PG,PE,ET 419 orl %ecx,%eax 420 movl %eax,%cr0 421 422 call check_x87 423 lgdt early_gdt_descr 424 lidt idt_descr 425 ljmp $(__KERNEL_CS),$1f 4261: movl $(__KERNEL_DS),%eax # reload all the segment registers 427 movl %eax,%ss # after changing gdt. 428 429 movl $(__USER_DS),%eax # DS/ES contains default USER segment 430 movl %eax,%ds 431 movl %eax,%es 432 433 movl $(__KERNEL_PERCPU), %eax 434 movl %eax,%fs # set this cpu's percpu 435 436#ifdef CONFIG_CC_STACKPROTECTOR 437 /* 438 * The linker can't handle this by relocation. Manually set 439 * base address in stack canary segment descriptor. 440 */ 441 cmpb $0,ready 442 jne 1f 443 movl $per_cpu__gdt_page,%eax 444 movl $per_cpu__stack_canary,%ecx 445 subl $20, %ecx 446 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 447 shrl $16, %ecx 448 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 449 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) 4501: 451#endif 452 movl $(__KERNEL_STACK_CANARY),%eax 453 movl %eax,%gs 454 455 xorl %eax,%eax # Clear LDT 456 lldt %ax 457 458 cld # gcc2 wants the direction flag cleared at all times 459 pushl $0 # fake return address for unwinder 460#ifdef CONFIG_SMP 461 movb ready, %cl 462 movb $1, ready 463 cmpb $0,%cl # the first CPU calls start_kernel 464 je 1f 465 movl (stack_start), %esp 4661: 467#endif /* CONFIG_SMP */ 468 jmp *(initial_code) 469 470/* 471 * We depend on ET to be correct. This checks for 287/387. 472 */ 473check_x87: 474 movb $0,X86_HARD_MATH 475 clts 476 fninit 477 fstsw %ax 478 cmpb $0,%al 479 je 1f 480 movl %cr0,%eax /* no coprocessor: have to set bits */ 481 xorl $4,%eax /* set EM */ 482 movl %eax,%cr0 483 ret 484 ALIGN 4851: movb $1,X86_HARD_MATH 486 .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */ 487 ret 488 489/* 490 * setup_idt 491 * 492 * sets up a idt with 256 entries pointing to 493 * ignore_int, interrupt gates. It doesn't actually load 494 * idt - that can be done only after paging has been enabled 495 * and the kernel moved to PAGE_OFFSET. Interrupts 496 * are enabled elsewhere, when we can be relatively 497 * sure everything is ok. 498 * 499 * Warning: %esi is live across this function. 500 */ 501setup_idt: 502 lea ignore_int,%edx 503 movl $(__KERNEL_CS << 16),%eax 504 movw %dx,%ax /* selector = 0x0010 = cs */ 505 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 506 507 lea idt_table,%edi 508 mov $256,%ecx 509rp_sidt: 510 movl %eax,(%edi) 511 movl %edx,4(%edi) 512 addl $8,%edi 513 dec %ecx 514 jne rp_sidt 515 516.macro set_early_handler handler,trapno 517 lea \handler,%edx 518 movl $(__KERNEL_CS << 16),%eax 519 movw %dx,%ax 520 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 521 lea idt_table,%edi 522 movl %eax,8*\trapno(%edi) 523 movl %edx,8*\trapno+4(%edi) 524.endm 525 526 set_early_handler handler=early_divide_err,trapno=0 527 set_early_handler handler=early_illegal_opcode,trapno=6 528 set_early_handler handler=early_protection_fault,trapno=13 529 set_early_handler handler=early_page_fault,trapno=14 530 531 ret 532 533early_divide_err: 534 xor %edx,%edx 535 pushl $0 /* fake errcode */ 536 jmp early_fault 537 538early_illegal_opcode: 539 movl $6,%edx 540 pushl $0 /* fake errcode */ 541 jmp early_fault 542 543early_protection_fault: 544 movl $13,%edx 545 jmp early_fault 546 547early_page_fault: 548 movl $14,%edx 549 jmp early_fault 550 551early_fault: 552 cld 553#ifdef CONFIG_PRINTK 554 pusha 555 movl $(__KERNEL_DS),%eax 556 movl %eax,%ds 557 movl %eax,%es 558 cmpl $2,early_recursion_flag 559 je hlt_loop 560 incl early_recursion_flag 561 movl %cr2,%eax 562 pushl %eax 563 pushl %edx /* trapno */ 564 pushl $fault_msg 565 call printk 566#endif 567 call dump_stack 568hlt_loop: 569 hlt 570 jmp hlt_loop 571 572/* This is the default interrupt "handler" :-) */ 573 ALIGN 574ignore_int: 575 cld 576#ifdef CONFIG_PRINTK 577 pushl %eax 578 pushl %ecx 579 pushl %edx 580 pushl %es 581 pushl %ds 582 movl $(__KERNEL_DS),%eax 583 movl %eax,%ds 584 movl %eax,%es 585 cmpl $2,early_recursion_flag 586 je hlt_loop 587 incl early_recursion_flag 588 pushl 16(%esp) 589 pushl 24(%esp) 590 pushl 32(%esp) 591 pushl 40(%esp) 592 pushl $int_msg 593 call printk 594 595 call dump_stack 596 597 addl $(5*4),%esp 598 popl %ds 599 popl %es 600 popl %edx 601 popl %ecx 602 popl %eax 603#endif 604 iret 605 606.section .cpuinit.data,"wa" 607.align 4 608ENTRY(initial_code) 609 .long i386_start_kernel 610 611.section .text 612/* 613 * Real beginning of normal "text" segment 614 */ 615ENTRY(stext) 616ENTRY(_stext) 617 618/* 619 * BSS section 620 */ 621.section ".bss.page_aligned","wa" 622 .align PAGE_SIZE_asm 623#ifdef CONFIG_X86_PAE 624swapper_pg_pmd: 625 .fill 1024*KPMDS,4,0 626#else 627ENTRY(swapper_pg_dir) 628 .fill 1024,4,0 629#endif 630swapper_pg_fixmap: 631 .fill 1024,4,0 632ENTRY(empty_zero_page) 633 .fill 4096,1,0 634 635/* 636 * This starts the data section. 637 */ 638#ifdef CONFIG_X86_PAE 639.section ".data.page_aligned","wa" 640 /* Page-aligned for the benefit of paravirt? */ 641 .align PAGE_SIZE_asm 642ENTRY(swapper_pg_dir) 643 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ 644# if KPMDS == 3 645 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 646 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0 647 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x2000),0 648# elif KPMDS == 2 649 .long 0,0 650 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 651 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0 652# elif KPMDS == 1 653 .long 0,0 654 .long 0,0 655 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 656# else 657# error "Kernel PMDs should be 1, 2 or 3" 658# endif 659 .align PAGE_SIZE_asm /* needs to be page-sized too */ 660#endif 661 662.data 663ENTRY(stack_start) 664 .long init_thread_union+THREAD_SIZE 665 .long __BOOT_DS 666 667ready: .byte 0 668 669early_recursion_flag: 670 .long 0 671 672int_msg: 673 .asciz "Unknown interrupt or fault at: %p %p %p\n" 674 675fault_msg: 676/* fault info: */ 677 .ascii "BUG: Int %d: CR2 %p\n" 678/* pusha regs: */ 679 .ascii " EDI %p ESI %p EBP %p ESP %p\n" 680 .ascii " EBX %p EDX %p ECX %p EAX %p\n" 681/* fault frame: */ 682 .ascii " err %p EIP %p CS %p flg %p\n" 683 .ascii "Stack: %p %p %p %p %p %p %p %p\n" 684 .ascii " %p %p %p %p %p %p %p %p\n" 685 .asciz " %p %p %p %p %p %p %p %p\n" 686 687#include "../../x86/xen/xen-head.S" 688 689/* 690 * The IDT and GDT 'descriptors' are a strange 48-bit object 691 * only used by the lidt and lgdt instructions. They are not 692 * like usual segment descriptors - they consist of a 16-bit 693 * segment size, and 32-bit linear address value: 694 */ 695 696.globl boot_gdt_descr 697.globl idt_descr 698 699 ALIGN 700# early boot GDT descriptor (must use 1:1 address mapping) 701 .word 0 # 32 bit align gdt_desc.address 702boot_gdt_descr: 703 .word __BOOT_DS+7 704 .long boot_gdt - __PAGE_OFFSET 705 706 .word 0 # 32-bit align idt_desc.address 707idt_descr: 708 .word IDT_ENTRIES*8-1 # idt contains 256 entries 709 .long idt_table 710 711# boot GDT descriptor (later on used by CPU#0): 712 .word 0 # 32 bit align gdt_desc.address 713ENTRY(early_gdt_descr) 714 .word GDT_ENTRIES*8-1 715 .long per_cpu__gdt_page /* Overwritten for secondary CPUs */ 716 717/* 718 * The boot_gdt must mirror the equivalent in setup.S and is 719 * used only for booting. 720 */ 721 .align L1_CACHE_BYTES 722ENTRY(boot_gdt) 723 .fill GDT_ENTRY_BOOT_CS,8,0 724 .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ 725 .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ 726