1/* 2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/segment.h> 16#include <asm/pgtable.h> 17#include <asm/page.h> 18#include <asm/msr.h> 19#include <asm/cache.h> 20#include <asm/processor-flags.h> 21#include <asm/percpu.h> 22#include <asm/nops.h> 23 24#ifdef CONFIG_PARAVIRT 25#include <asm/asm-offsets.h> 26#include <asm/paravirt.h> 27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 28#else 29#define GET_CR2_INTO(reg) movq %cr2, reg 30#define INTERRUPT_RETURN iretq 31#endif 32 33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 34 * because we need identity-mapped pages. 35 * 36 */ 37 38#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 39 40L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) 41L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) 42L4_START_KERNEL = pgd_index(__START_KERNEL_map) 43L3_START_KERNEL = pud_index(__START_KERNEL_map) 44 45 .text 46 __HEAD 47 .code64 48 .globl startup_64 49startup_64: 50 /* 51 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 52 * and someone has loaded an identity mapped page table 53 * for us. These identity mapped page tables map all of the 54 * kernel pages and possibly all of memory. 55 * 56 * %rsi holds a physical pointer to real_mode_data. 57 * 58 * We come here either directly from a 64bit bootloader, or from 59 * arch/x86_64/boot/compressed/head.S. 60 * 61 * We only come here initially at boot nothing else comes here. 62 * 63 * Since we may be loaded at an address different from what we were 64 * compiled to run at we first fixup the physical addresses in our page 65 * tables and then reload them. 66 */ 67 68 /* 69 * Compute the delta between the address I am compiled to run at and the 70 * address I am actually running at. 71 */ 72 leaq _text(%rip), %rbp 73 subq $_text - __START_KERNEL_map, %rbp 74 75 /* Is the address not 2M aligned? */ 76 movq %rbp, %rax 77 andl $~PMD_PAGE_MASK, %eax 78 testl %eax, %eax 79 jnz bad_address 80 81 /* 82 * Is the address too large? 83 */ 84 leaq _text(%rip), %rax 85 shrq $MAX_PHYSMEM_BITS, %rax 86 jnz bad_address 87 88 /* 89 * Fixup the physical addresses in the page table 90 */ 91 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) 92 93 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 94 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 95 96 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 97 98 /* 99 * Set up the identity mapping for the switchover. These 100 * entries should *NOT* have the global bit set! This also 101 * creates a bunch of nonsense entries but that is fine -- 102 * it avoids problems around wraparound. 103 */ 104 leaq _text(%rip), %rdi 105 leaq early_level4_pgt(%rip), %rbx 106 107 movq %rdi, %rax 108 shrq $PGDIR_SHIFT, %rax 109 110 leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx 111 movq %rdx, 0(%rbx,%rax,8) 112 movq %rdx, 8(%rbx,%rax,8) 113 114 addq $4096, %rdx 115 movq %rdi, %rax 116 shrq $PUD_SHIFT, %rax 117 andl $(PTRS_PER_PUD-1), %eax 118 movq %rdx, (4096+0)(%rbx,%rax,8) 119 movq %rdx, (4096+8)(%rbx,%rax,8) 120 121 addq $8192, %rbx 122 movq %rdi, %rax 123 shrq $PMD_SHIFT, %rdi 124 addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax 125 leaq (_end - 1)(%rip), %rcx 126 shrq $PMD_SHIFT, %rcx 127 subq %rdi, %rcx 128 incl %ecx 129 1301: 131 andq $(PTRS_PER_PMD - 1), %rdi 132 movq %rax, (%rbx,%rdi,8) 133 incq %rdi 134 addq $PMD_SIZE, %rax 135 decl %ecx 136 jnz 1b 137 138 /* 139 * Fixup the kernel text+data virtual addresses. Note that 140 * we might write invalid pmds, when the kernel is relocated 141 * cleanup_highmap() fixes this up along with the mappings 142 * beyond _end. 143 */ 144 leaq level2_kernel_pgt(%rip), %rdi 145 leaq 4096(%rdi), %r8 146 /* See if it is a valid page table entry */ 1471: testq $1, 0(%rdi) 148 jz 2f 149 addq %rbp, 0(%rdi) 150 /* Go to the next page */ 1512: addq $8, %rdi 152 cmp %r8, %rdi 153 jne 1b 154 155 /* Fixup phys_base */ 156 addq %rbp, phys_base(%rip) 157 158 movq $(early_level4_pgt - __START_KERNEL_map), %rax 159 jmp 1f 160ENTRY(secondary_startup_64) 161 /* 162 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 163 * and someone has loaded a mapped page table. 164 * 165 * %rsi holds a physical pointer to real_mode_data. 166 * 167 * We come here either from startup_64 (using physical addresses) 168 * or from trampoline.S (using virtual addresses). 169 * 170 * Using virtual addresses from trampoline.S removes the need 171 * to have any identity mapped pages in the kernel page table 172 * after the boot processor executes this code. 173 */ 174 175 movq $(init_level4_pgt - __START_KERNEL_map), %rax 1761: 177 178 /* Enable PAE mode and PGE */ 179 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 180 movq %rcx, %cr4 181 182 /* Setup early boot stage 4 level pagetables. */ 183 addq phys_base(%rip), %rax 184 movq %rax, %cr3 185 186 /* Ensure I am executing from virtual addresses */ 187 movq $1f, %rax 188 jmp *%rax 1891: 190 191 /* Check if nx is implemented */ 192 movl $0x80000001, %eax 193 cpuid 194 movl %edx,%edi 195 196 /* Setup EFER (Extended Feature Enable Register) */ 197 movl $MSR_EFER, %ecx 198 rdmsr 199 btsl $_EFER_SCE, %eax /* Enable System Call */ 200 btl $20,%edi /* No Execute supported? */ 201 jnc 1f 202 btsl $_EFER_NX, %eax 203 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 2041: wrmsr /* Make changes effective */ 205 206 /* Setup cr0 */ 207#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 208 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 209 X86_CR0_PG) 210 movl $CR0_STATE, %eax 211 /* Make changes effective */ 212 movq %rax, %cr0 213 214 /* Setup a boot time stack */ 215 movq stack_start(%rip), %rsp 216 217 /* zero EFLAGS after setting rsp */ 218 pushq $0 219 popfq 220 221 /* 222 * We must switch to a new descriptor in kernel space for the GDT 223 * because soon the kernel won't have access anymore to the userspace 224 * addresses where we're currently running on. We have to do that here 225 * because in 32bit we couldn't load a 64bit linear address. 226 */ 227 lgdt early_gdt_descr(%rip) 228 229 /* set up data segments */ 230 xorl %eax,%eax 231 movl %eax,%ds 232 movl %eax,%ss 233 movl %eax,%es 234 235 /* 236 * We don't really need to load %fs or %gs, but load them anyway 237 * to kill any stale realmode selectors. This allows execution 238 * under VT hardware. 239 */ 240 movl %eax,%fs 241 movl %eax,%gs 242 243 /* Set up %gs. 244 * 245 * The base of %gs always points to the bottom of the irqstack 246 * union. If the stack protector canary is enabled, it is 247 * located at %gs:40. Note that, on SMP, the boot cpu uses 248 * init data section till per cpu areas are set up. 249 */ 250 movl $MSR_GS_BASE,%ecx 251 movl initial_gs(%rip),%eax 252 movl initial_gs+4(%rip),%edx 253 wrmsr 254 255 /* rsi is pointer to real mode structure with interesting info. 256 pass it to C */ 257 movq %rsi, %rdi 258 259 /* Finally jump to run C code and to be on real kernel address 260 * Since we are running on identity-mapped space we have to jump 261 * to the full 64bit address, this is only possible as indirect 262 * jump. In addition we need to ensure %cs is set so we make this 263 * a far return. 264 * 265 * Note: do not change to far jump indirect with 64bit offset. 266 * 267 * AMD does not support far jump indirect with 64bit offset. 268 * AMD64 Architecture Programmer's Manual, Volume 3: states only 269 * JMP FAR mem16:16 FF /5 Far jump indirect, 270 * with the target specified by a far pointer in memory. 271 * JMP FAR mem16:32 FF /5 Far jump indirect, 272 * with the target specified by a far pointer in memory. 273 * 274 * Intel64 does support 64bit offset. 275 * Software Developer Manual Vol 2: states: 276 * FF /5 JMP m16:16 Jump far, absolute indirect, 277 * address given in m16:16 278 * FF /5 JMP m16:32 Jump far, absolute indirect, 279 * address given in m16:32. 280 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 281 * address given in m16:64. 282 */ 283 movq initial_code(%rip),%rax 284 pushq $0 # fake return address to stop unwinder 285 pushq $__KERNEL_CS # set correct cs 286 pushq %rax # target address in negative space 287 lretq 288 289#ifdef CONFIG_HOTPLUG_CPU 290/* 291 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 292 * up already except stack. We just set up stack here. Then call 293 * start_secondary(). 294 */ 295ENTRY(start_cpu0) 296 movq stack_start(%rip),%rsp 297 movq initial_code(%rip),%rax 298 pushq $0 # fake return address to stop unwinder 299 pushq $__KERNEL_CS # set correct cs 300 pushq %rax # target address in negative space 301 lretq 302ENDPROC(start_cpu0) 303#endif 304 305 /* SMP bootup changes these two */ 306 __REFDATA 307 .balign 8 308 GLOBAL(initial_code) 309 .quad x86_64_start_kernel 310 GLOBAL(initial_gs) 311 .quad INIT_PER_CPU_VAR(irq_stack_union) 312 313 GLOBAL(stack_start) 314 .quad init_thread_union+THREAD_SIZE-8 315 .word 0 316 __FINITDATA 317 318bad_address: 319 jmp bad_address 320 321 __INIT 322 .globl early_idt_handlers 323early_idt_handlers: 324 # 104(%rsp) %rflags 325 # 96(%rsp) %cs 326 # 88(%rsp) %rip 327 # 80(%rsp) error code 328 i = 0 329 .rept NUM_EXCEPTION_VECTORS 330 .if (EXCEPTION_ERRCODE_MASK >> i) & 1 331 ASM_NOP2 332 .else 333 pushq $0 # Dummy error code, to make stack frame uniform 334 .endif 335 pushq $i # 72(%rsp) Vector number 336 jmp early_idt_handler 337 i = i + 1 338 .endr 339 340/* This is global to keep gas from relaxing the jumps */ 341ENTRY(early_idt_handler) 342 cld 343 344 cmpl $2,early_recursion_flag(%rip) 345 jz 1f 346 incl early_recursion_flag(%rip) 347 348 pushq %rax # 64(%rsp) 349 pushq %rcx # 56(%rsp) 350 pushq %rdx # 48(%rsp) 351 pushq %rsi # 40(%rsp) 352 pushq %rdi # 32(%rsp) 353 pushq %r8 # 24(%rsp) 354 pushq %r9 # 16(%rsp) 355 pushq %r10 # 8(%rsp) 356 pushq %r11 # 0(%rsp) 357 358 cmpl $__KERNEL_CS,96(%rsp) 359 jne 11f 360 361 cmpl $14,72(%rsp) # Page fault? 362 jnz 10f 363 GET_CR2_INTO(%rdi) # can clobber any volatile register if pv 364 call early_make_pgtable 365 andl %eax,%eax 366 jz 20f # All good 367 36810: 369 leaq 88(%rsp),%rdi # Pointer to %rip 370 call early_fixup_exception 371 andl %eax,%eax 372 jnz 20f # Found an exception entry 373 37411: 375#ifdef CONFIG_EARLY_PRINTK 376 GET_CR2_INTO(%r9) # can clobber any volatile register if pv 377 movl 80(%rsp),%r8d # error code 378 movl 72(%rsp),%esi # vector number 379 movl 96(%rsp),%edx # %cs 380 movq 88(%rsp),%rcx # %rip 381 xorl %eax,%eax 382 leaq early_idt_msg(%rip),%rdi 383 call early_printk 384 cmpl $2,early_recursion_flag(%rip) 385 jz 1f 386 call dump_stack 387#ifdef CONFIG_KALLSYMS 388 leaq early_idt_ripmsg(%rip),%rdi 389 movq 40(%rsp),%rsi # %rip again 390 call __print_symbol 391#endif 392#endif /* EARLY_PRINTK */ 3931: hlt 394 jmp 1b 395 39620: # Exception table entry found or page table generated 397 popq %r11 398 popq %r10 399 popq %r9 400 popq %r8 401 popq %rdi 402 popq %rsi 403 popq %rdx 404 popq %rcx 405 popq %rax 406 addq $16,%rsp # drop vector number and error code 407 decl early_recursion_flag(%rip) 408 INTERRUPT_RETURN 409ENDPROC(early_idt_handler) 410 411 __INITDATA 412 413 .balign 4 414early_recursion_flag: 415 .long 0 416 417#ifdef CONFIG_EARLY_PRINTK 418early_idt_msg: 419 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 420early_idt_ripmsg: 421 .asciz "RIP %s\n" 422#endif /* CONFIG_EARLY_PRINTK */ 423 424#define NEXT_PAGE(name) \ 425 .balign PAGE_SIZE; \ 426GLOBAL(name) 427 428/* Automate the creation of 1 to 1 mapping pmd entries */ 429#define PMDS(START, PERM, COUNT) \ 430 i = 0 ; \ 431 .rept (COUNT) ; \ 432 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 433 i = i + 1 ; \ 434 .endr 435 436 __INITDATA 437NEXT_PAGE(early_level4_pgt) 438 .fill 511,8,0 439 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 440 441NEXT_PAGE(early_dynamic_pgts) 442 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 443 444 .data 445 446#ifndef CONFIG_XEN 447NEXT_PAGE(init_level4_pgt) 448 .fill 512,8,0 449#else 450NEXT_PAGE(init_level4_pgt) 451 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 452 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 453 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 454 .org init_level4_pgt + L4_START_KERNEL*8, 0 455 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 456 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 457 458NEXT_PAGE(level3_ident_pgt) 459 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 460 .fill 511, 8, 0 461NEXT_PAGE(level2_ident_pgt) 462 /* Since I easily can, map the first 1G. 463 * Don't set NX because code runs from these pages. 464 */ 465 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 466#endif 467 468NEXT_PAGE(level3_kernel_pgt) 469 .fill L3_START_KERNEL,8,0 470 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 471 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 472 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 473 474NEXT_PAGE(level2_kernel_pgt) 475 /* 476 * 512 MB kernel mapping. We spend a full page on this pagetable 477 * anyway. 478 * 479 * The kernel code+data+bss must not be bigger than that. 480 * 481 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 482 * If you want to increase this then increase MODULES_VADDR 483 * too.) 484 */ 485 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 486 KERNEL_IMAGE_SIZE/PMD_SIZE) 487 488NEXT_PAGE(level2_fixmap_pgt) 489 .fill 506,8,0 490 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 491 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 492 .fill 5,8,0 493 494NEXT_PAGE(level1_fixmap_pgt) 495 .fill 512,8,0 496 497#undef PMDS 498 499 .data 500 .align 16 501 .globl early_gdt_descr 502early_gdt_descr: 503 .word GDT_ENTRIES*8-1 504early_gdt_descr_base: 505 .quad INIT_PER_CPU_VAR(gdt_page) 506 507ENTRY(phys_base) 508 /* This must match the first entry in level2_kernel_pgt */ 509 .quad 0x0000000000000000 510 511#include "../../x86/xen/xen-head.S" 512 513 .section .bss, "aw", @nobits 514 .align L1_CACHE_BYTES 515ENTRY(idt_table) 516 .skip IDT_ENTRIES * 16 517 518 .align L1_CACHE_BYTES 519ENTRY(nmi_idt_table) 520 .skip IDT_ENTRIES * 16 521 522 __PAGE_ALIGNED_BSS 523NEXT_PAGE(empty_zero_page) 524 .skip PAGE_SIZE 525