1/* 2 * 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * 5 * Enhanced CPU detection and feature setting code by Mike Jagdis 6 * and Martin Mares, November 1997. 7 */ 8 9.text 10#include <linux/threads.h> 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/segment.h> 14#include <asm/page_types.h> 15#include <asm/pgtable_types.h> 16#include <asm/cache.h> 17#include <asm/thread_info.h> 18#include <asm/asm-offsets.h> 19#include <asm/setup.h> 20#include <asm/processor-flags.h> 21#include <asm/percpu.h> 22 23/* Physical address */ 24#define pa(X) ((X) - __PAGE_OFFSET) 25 26/* 27 * References to members of the new_cpu_data structure. 28 */ 29 30#define X86 new_cpu_data+CPUINFO_x86 31#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor 32#define X86_MODEL new_cpu_data+CPUINFO_x86_model 33#define X86_MASK new_cpu_data+CPUINFO_x86_mask 34#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math 35#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level 36#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability 37#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id 38 39/* 40 * This is how much memory in addition to the memory covered up to 41 * and including _end we need mapped initially. 42 * We need: 43 * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) 44 * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) 45 * 46 * Modulo rounding, each megabyte assigned here requires a kilobyte of 47 * memory, which is currently unreclaimed. 48 * 49 * This should be a multiple of a page. 50 * 51 * KERNEL_IMAGE_SIZE should be greater than pa(_end) 52 * and small than max_low_pfn, otherwise will waste some page table entries 53 */ 54 55#if PTRS_PER_PMD > 1 56#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) 57#else 58#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 59#endif 60 61/* Enough space to fit pagetables for the low memory linear map */ 62MAPPING_BEYOND_END = \ 63 PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT 64 65/* 66 * Worst-case size of the kernel mapping we need to make: 67 * the worst-case size of the kernel itself, plus the extra we need 68 * to map for the linear map. 69 */ 70KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT 71 72INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm 73RESERVE_BRK(pagetables, INIT_MAP_SIZE) 74 75/* 76 * 32-bit kernel entrypoint; only used by the boot CPU. On entry, 77 * %esi points to the real-mode code as a 32-bit pointer. 78 * CS and DS must be 4 GB flat segments, but we don't depend on 79 * any particular GDT layout, because we load our own as soon as we 80 * can. 81 */ 82.section .text.head,"ax",@progbits 83ENTRY(startup_32) 84 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 85 us to not reload segments */ 86 testb $(1<<6), BP_loadflags(%esi) 87 jnz 2f 88 89/* 90 * Set segments to known values. 91 */ 92 lgdt pa(boot_gdt_descr) 93 movl $(__BOOT_DS),%eax 94 movl %eax,%ds 95 movl %eax,%es 96 movl %eax,%fs 97 movl %eax,%gs 982: 99 100/* 101 * Clear BSS first so that there are no surprises... 102 */ 103 cld 104 xorl %eax,%eax 105 movl $pa(__bss_start),%edi 106 movl $pa(__bss_stop),%ecx 107 subl %edi,%ecx 108 shrl $2,%ecx 109 rep ; stosl 110/* 111 * Copy bootup parameters out of the way. 112 * Note: %esi still has the pointer to the real-mode data. 113 * With the kexec as boot loader, parameter segment might be loaded beyond 114 * kernel image and might not even be addressable by early boot page tables. 115 * (kexec on panic case). Hence copy out the parameters before initializing 116 * page tables. 117 */ 118 movl $pa(boot_params),%edi 119 movl $(PARAM_SIZE/4),%ecx 120 cld 121 rep 122 movsl 123 movl pa(boot_params) + NEW_CL_POINTER,%esi 124 andl %esi,%esi 125 jz 1f # No comand line 126 movl $pa(boot_command_line),%edi 127 movl $(COMMAND_LINE_SIZE/4),%ecx 128 rep 129 movsl 1301: 131 132#ifdef CONFIG_PARAVIRT 133 /* This is can only trip for a broken bootloader... */ 134 cmpw $0x207, pa(boot_params + BP_version) 135 jb default_entry 136 137 /* Paravirt-compatible boot parameters. Look to see what architecture 138 we're booting under. */ 139 movl pa(boot_params + BP_hardware_subarch), %eax 140 cmpl $num_subarch_entries, %eax 141 jae bad_subarch 142 143 movl pa(subarch_entries)(,%eax,4), %eax 144 subl $__PAGE_OFFSET, %eax 145 jmp *%eax 146 147bad_subarch: 148WEAK(lguest_entry) 149WEAK(xen_entry) 150 /* Unknown implementation; there's really 151 nothing we can do at this point. */ 152 ud2a 153 154 __INITDATA 155 156subarch_entries: 157 .long default_entry /* normal x86/PC */ 158 .long lguest_entry /* lguest hypervisor */ 159 .long xen_entry /* Xen hypervisor */ 160 .long default_entry /* Moorestown MID */ 161num_subarch_entries = (. - subarch_entries) / 4 162.previous 163#endif /* CONFIG_PARAVIRT */ 164 165/* 166 * Initialize page tables. This creates a PDE and a set of page 167 * tables, which are located immediately beyond __brk_base. The variable 168 * _brk_end is set up to point to the first "safe" location. 169 * Mappings are created both at virtual address 0 (identity mapping) 170 * and PAGE_OFFSET for up to _end. 171 * 172 * Note that the stack is not yet set up! 173 */ 174default_entry: 175#ifdef CONFIG_X86_PAE 176 177 /* 178 * In PAE mode swapper_pg_dir is statically defined to contain enough 179 * entries to cover the VMSPLIT option (that is the top 1, 2 or 3 180 * entries). The identity mapping is handled by pointing two PGD 181 * entries to the first kernel PMD. 182 * 183 * Note the upper half of each PMD or PTE are always zero at 184 * this stage. 185 */ 186 187#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 188 189 xorl %ebx,%ebx /* %ebx is kept at zero */ 190 191 movl $pa(__brk_base), %edi 192 movl $pa(swapper_pg_pmd), %edx 193 movl $PTE_IDENT_ATTR, %eax 19410: 195 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ 196 movl %ecx,(%edx) /* Store PMD entry */ 197 /* Upper half already zero */ 198 addl $8,%edx 199 movl $512,%ecx 20011: 201 stosl 202 xchgl %eax,%ebx 203 stosl 204 xchgl %eax,%ebx 205 addl $0x1000,%eax 206 loop 11b 207 208 /* 209 * End condition: we must map up to the end + MAPPING_BEYOND_END. 210 */ 211 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 212 cmpl %ebp,%eax 213 jb 10b 2141: 215 addl $__PAGE_OFFSET, %edi 216 movl %edi, pa(_brk_end) 217 shrl $12, %eax 218 movl %eax, pa(max_pfn_mapped) 219 220 /* Do early initialization of the fixmap area */ 221 movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax 222 movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) 223#else /* Not PAE */ 224 225page_pde_offset = (__PAGE_OFFSET >> 20); 226 227 movl $pa(__brk_base), %edi 228 movl $pa(swapper_pg_dir), %edx 229 movl $PTE_IDENT_ATTR, %eax 23010: 231 leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ 232 movl %ecx,(%edx) /* Store identity PDE entry */ 233 movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 234 addl $4,%edx 235 movl $1024, %ecx 23611: 237 stosl 238 addl $0x1000,%eax 239 loop 11b 240 /* 241 * End condition: we must map up to the end + MAPPING_BEYOND_END. 242 */ 243 movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 244 cmpl %ebp,%eax 245 jb 10b 246 addl $__PAGE_OFFSET, %edi 247 movl %edi, pa(_brk_end) 248 shrl $12, %eax 249 movl %eax, pa(max_pfn_mapped) 250 251 /* Do early initialization of the fixmap area */ 252 movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax 253 movl %eax,pa(swapper_pg_dir+0xffc) 254#endif 255 jmp 3f 256/* 257 * Non-boot CPU entry point; entered from trampoline.S 258 * We can't lgdt here, because lgdt itself uses a data segment, but 259 * we know the trampoline has already loaded the boot_gdt for us. 260 * 261 * If cpu hotplug is not supported then this code can go in init section 262 * which will be freed later 263 */ 264 265__CPUINIT 266 267#ifdef CONFIG_SMP 268ENTRY(startup_32_smp) 269 cld 270 movl $(__BOOT_DS),%eax 271 movl %eax,%ds 272 movl %eax,%es 273 movl %eax,%fs 274 movl %eax,%gs 275#endif /* CONFIG_SMP */ 2763: 277 278/* 279 * New page tables may be in 4Mbyte page mode and may 280 * be using the global pages. 281 * 282 * NOTE! If we are on a 486 we may have no cr4 at all! 283 * So we do not try to touch it unless we really have 284 * some bits in it to set. This won't work if the BSP 285 * implements cr4 but this AP does not -- very unlikely 286 * but be warned! The same applies to the pse feature 287 * if not equally supported. --macro 288 * 289 * NOTE! We have to correct for the fact that we're 290 * not yet offset PAGE_OFFSET.. 291 */ 292#define cr4_bits pa(mmu_cr4_features) 293 movl cr4_bits,%edx 294 andl %edx,%edx 295 jz 6f 296 movl %cr4,%eax # Turn on paging options (PSE,PAE,..) 297 orl %edx,%eax 298 movl %eax,%cr4 299 300 btl $5, %eax # check if PAE is enabled 301 jnc 6f 302 303 /* Check if extended functions are implemented */ 304 movl $0x80000000, %eax 305 cpuid 306 cmpl $0x80000000, %eax 307 jbe 6f 308 mov $0x80000001, %eax 309 cpuid 310 /* Execute Disable bit supported? */ 311 btl $20, %edx 312 jnc 6f 313 314 /* Setup EFER (Extended Feature Enable Register) */ 315 movl $0xc0000080, %ecx 316 rdmsr 317 318 btsl $11, %eax 319 /* Make changes effective */ 320 wrmsr 321 3226: 323 324/* 325 * Enable paging 326 */ 327 movl $pa(swapper_pg_dir),%eax 328 movl %eax,%cr3 /* set the page table pointer.. */ 329 movl %cr0,%eax 330 orl $X86_CR0_PG,%eax 331 movl %eax,%cr0 /* ..and set paging (PG) bit */ 332 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 3331: 334 /* Set up the stack pointer */ 335 lss stack_start,%esp 336 337/* 338 * Initialize eflags. Some BIOS's leave bits like NT set. This would 339 * confuse the debugger if this code is traced. 340 * XXX - best to initialize before switching to protected mode. 341 */ 342 pushl $0 343 popfl 344 345#ifdef CONFIG_SMP 346 cmpb $0, ready 347 jz 1f /* Initial CPU cleans BSS */ 348 jmp checkCPUtype 3491: 350#endif /* CONFIG_SMP */ 351 352/* 353 * start system 32-bit setup. We need to re-do some of the things done 354 * in 16-bit mode for the "real" operations. 355 */ 356 call setup_idt 357 358checkCPUtype: 359 360 movl $-1,X86_CPUID # -1 for no CPUID initially 361 362/* check if it is 486 or 386. */ 363/* 364 * XXX - this does a lot of unnecessary setup. Alignment checks don't 365 * apply at our cpl of 0 and the stack ought to be aligned already, and 366 * we don't need to preserve eflags. 367 */ 368 369 movb $3,X86 # at least 386 370 pushfl # push EFLAGS 371 popl %eax # get EFLAGS 372 movl %eax,%ecx # save original EFLAGS 373 xorl $0x240000,%eax # flip AC and ID bits in EFLAGS 374 pushl %eax # copy to EFLAGS 375 popfl # set EFLAGS 376 pushfl # get new EFLAGS 377 popl %eax # put it in eax 378 xorl %ecx,%eax # change in flags 379 pushl %ecx # restore original EFLAGS 380 popfl 381 testl $0x40000,%eax # check if AC bit changed 382 je is386 383 384 movb $4,X86 # at least 486 385 testl $0x200000,%eax # check if ID bit changed 386 je is486 387 388 /* get vendor info */ 389 xorl %eax,%eax # call CPUID with 0 -> return vendor ID 390 cpuid 391 movl %eax,X86_CPUID # save CPUID level 392 movl %ebx,X86_VENDOR_ID # lo 4 chars 393 movl %edx,X86_VENDOR_ID+4 # next 4 chars 394 movl %ecx,X86_VENDOR_ID+8 # last 4 chars 395 396 orl %eax,%eax # do we have processor info as well? 397 je is486 398 399 movl $1,%eax # Use the CPUID instruction to get CPU type 400 cpuid 401 movb %al,%cl # save reg for future use 402 andb $0x0f,%ah # mask processor family 403 movb %ah,X86 404 andb $0xf0,%al # mask model 405 shrb $4,%al 406 movb %al,X86_MODEL 407 andb $0x0f,%cl # mask mask revision 408 movb %cl,X86_MASK 409 movl %edx,X86_CAPABILITY 410 411is486: movl $0x50022,%ecx # set AM, WP, NE and MP 412 jmp 2f 413 414is386: movl $2,%ecx # set MP 4152: movl %cr0,%eax 416 andl $0x80000011,%eax # Save PG,PE,ET 417 orl %ecx,%eax 418 movl %eax,%cr0 419 420 call check_x87 421 lgdt early_gdt_descr 422 lidt idt_descr 423 ljmp $(__KERNEL_CS),$1f 4241: movl $(__KERNEL_DS),%eax # reload all the segment registers 425 movl %eax,%ss # after changing gdt. 426 427 movl $(__USER_DS),%eax # DS/ES contains default USER segment 428 movl %eax,%ds 429 movl %eax,%es 430 431 movl $(__KERNEL_PERCPU), %eax 432 movl %eax,%fs # set this cpu's percpu 433 434#ifdef CONFIG_CC_STACKPROTECTOR 435 /* 436 * The linker can't handle this by relocation. Manually set 437 * base address in stack canary segment descriptor. 438 */ 439 cmpb $0,ready 440 jne 1f 441 movl $per_cpu__gdt_page,%eax 442 movl $per_cpu__stack_canary,%ecx 443 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 444 shrl $16, %ecx 445 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 446 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) 4471: 448#endif 449 movl $(__KERNEL_STACK_CANARY),%eax 450 movl %eax,%gs 451 452 xorl %eax,%eax # Clear LDT 453 lldt %ax 454 455 cld # gcc2 wants the direction flag cleared at all times 456 pushl $0 # fake return address for unwinder 457#ifdef CONFIG_SMP 458 movb ready, %cl 459 movb $1, ready 460 cmpb $0,%cl # the first CPU calls start_kernel 461 je 1f 462 movl (stack_start), %esp 4631: 464#endif /* CONFIG_SMP */ 465 jmp *(initial_code) 466 467/* 468 * We depend on ET to be correct. This checks for 287/387. 469 */ 470check_x87: 471 movb $0,X86_HARD_MATH 472 clts 473 fninit 474 fstsw %ax 475 cmpb $0,%al 476 je 1f 477 movl %cr0,%eax /* no coprocessor: have to set bits */ 478 xorl $4,%eax /* set EM */ 479 movl %eax,%cr0 480 ret 481 ALIGN 4821: movb $1,X86_HARD_MATH 483 .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */ 484 ret 485 486/* 487 * setup_idt 488 * 489 * sets up a idt with 256 entries pointing to 490 * ignore_int, interrupt gates. It doesn't actually load 491 * idt - that can be done only after paging has been enabled 492 * and the kernel moved to PAGE_OFFSET. Interrupts 493 * are enabled elsewhere, when we can be relatively 494 * sure everything is ok. 495 * 496 * Warning: %esi is live across this function. 497 */ 498setup_idt: 499 lea ignore_int,%edx 500 movl $(__KERNEL_CS << 16),%eax 501 movw %dx,%ax /* selector = 0x0010 = cs */ 502 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 503 504 lea idt_table,%edi 505 mov $256,%ecx 506rp_sidt: 507 movl %eax,(%edi) 508 movl %edx,4(%edi) 509 addl $8,%edi 510 dec %ecx 511 jne rp_sidt 512 513.macro set_early_handler handler,trapno 514 lea \handler,%edx 515 movl $(__KERNEL_CS << 16),%eax 516 movw %dx,%ax 517 movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ 518 lea idt_table,%edi 519 movl %eax,8*\trapno(%edi) 520 movl %edx,8*\trapno+4(%edi) 521.endm 522 523 set_early_handler handler=early_divide_err,trapno=0 524 set_early_handler handler=early_illegal_opcode,trapno=6 525 set_early_handler handler=early_protection_fault,trapno=13 526 set_early_handler handler=early_page_fault,trapno=14 527 528 ret 529 530early_divide_err: 531 xor %edx,%edx 532 pushl $0 /* fake errcode */ 533 jmp early_fault 534 535early_illegal_opcode: 536 movl $6,%edx 537 pushl $0 /* fake errcode */ 538 jmp early_fault 539 540early_protection_fault: 541 movl $13,%edx 542 jmp early_fault 543 544early_page_fault: 545 movl $14,%edx 546 jmp early_fault 547 548early_fault: 549 cld 550#ifdef CONFIG_PRINTK 551 pusha 552 movl $(__KERNEL_DS),%eax 553 movl %eax,%ds 554 movl %eax,%es 555 cmpl $2,early_recursion_flag 556 je hlt_loop 557 incl early_recursion_flag 558 movl %cr2,%eax 559 pushl %eax 560 pushl %edx /* trapno */ 561 pushl $fault_msg 562 call printk 563#endif 564 call dump_stack 565hlt_loop: 566 hlt 567 jmp hlt_loop 568 569/* This is the default interrupt "handler" :-) */ 570 ALIGN 571ignore_int: 572 cld 573#ifdef CONFIG_PRINTK 574 pushl %eax 575 pushl %ecx 576 pushl %edx 577 pushl %es 578 pushl %ds 579 movl $(__KERNEL_DS),%eax 580 movl %eax,%ds 581 movl %eax,%es 582 cmpl $2,early_recursion_flag 583 je hlt_loop 584 incl early_recursion_flag 585 pushl 16(%esp) 586 pushl 24(%esp) 587 pushl 32(%esp) 588 pushl 40(%esp) 589 pushl $int_msg 590 call printk 591 592 call dump_stack 593 594 addl $(5*4),%esp 595 popl %ds 596 popl %es 597 popl %edx 598 popl %ecx 599 popl %eax 600#endif 601 iret 602 603 __REFDATA 604.align 4 605ENTRY(initial_code) 606 .long i386_start_kernel 607 608/* 609 * BSS section 610 */ 611.section ".bss.page_aligned","wa" 612 .align PAGE_SIZE_asm 613#ifdef CONFIG_X86_PAE 614swapper_pg_pmd: 615 .fill 1024*KPMDS,4,0 616#else 617ENTRY(swapper_pg_dir) 618 .fill 1024,4,0 619#endif 620swapper_pg_fixmap: 621 .fill 1024,4,0 622ENTRY(empty_zero_page) 623 .fill 4096,1,0 624 625/* 626 * This starts the data section. 627 */ 628#ifdef CONFIG_X86_PAE 629.section ".data.page_aligned","wa" 630 /* Page-aligned for the benefit of paravirt? */ 631 .align PAGE_SIZE_asm 632ENTRY(swapper_pg_dir) 633 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ 634# if KPMDS == 3 635 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 636 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0 637 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x2000),0 638# elif KPMDS == 2 639 .long 0,0 640 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 641 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR+0x1000),0 642# elif KPMDS == 1 643 .long 0,0 644 .long 0,0 645 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 646# else 647# error "Kernel PMDs should be 1, 2 or 3" 648# endif 649 .align PAGE_SIZE_asm /* needs to be page-sized too */ 650#endif 651 652.data 653ENTRY(stack_start) 654 .long init_thread_union+THREAD_SIZE 655 .long __BOOT_DS 656 657ready: .byte 0 658 659early_recursion_flag: 660 .long 0 661 662int_msg: 663 .asciz "Unknown interrupt or fault at: %p %p %p\n" 664 665fault_msg: 666/* fault info: */ 667 .ascii "BUG: Int %d: CR2 %p\n" 668/* pusha regs: */ 669 .ascii " EDI %p ESI %p EBP %p ESP %p\n" 670 .ascii " EBX %p EDX %p ECX %p EAX %p\n" 671/* fault frame: */ 672 .ascii " err %p EIP %p CS %p flg %p\n" 673 .ascii "Stack: %p %p %p %p %p %p %p %p\n" 674 .ascii " %p %p %p %p %p %p %p %p\n" 675 .asciz " %p %p %p %p %p %p %p %p\n" 676 677#include "../../x86/xen/xen-head.S" 678 679/* 680 * The IDT and GDT 'descriptors' are a strange 48-bit object 681 * only used by the lidt and lgdt instructions. They are not 682 * like usual segment descriptors - they consist of a 16-bit 683 * segment size, and 32-bit linear address value: 684 */ 685 686.globl boot_gdt_descr 687.globl idt_descr 688 689 ALIGN 690# early boot GDT descriptor (must use 1:1 address mapping) 691 .word 0 # 32 bit align gdt_desc.address 692boot_gdt_descr: 693 .word __BOOT_DS+7 694 .long boot_gdt - __PAGE_OFFSET 695 696 .word 0 # 32-bit align idt_desc.address 697idt_descr: 698 .word IDT_ENTRIES*8-1 # idt contains 256 entries 699 .long idt_table 700 701# boot GDT descriptor (later on used by CPU#0): 702 .word 0 # 32 bit align gdt_desc.address 703ENTRY(early_gdt_descr) 704 .word GDT_ENTRIES*8-1 705 .long per_cpu__gdt_page /* Overwritten for secondary CPUs */ 706 707/* 708 * The boot_gdt must mirror the equivalent in setup.S and is 709 * used only for booting. 710 */ 711 .align L1_CACHE_BYTES 712ENTRY(boot_gdt) 713 .fill GDT_ENTRY_BOOT_CS,8,0 714 .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ 715 .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ 716