1/* 2 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/segment.h> 16#include <asm/pgtable.h> 17#include <asm/page.h> 18#include <asm/msr.h> 19#include <asm/cache.h> 20#include <asm/processor-flags.h> 21#include <asm/percpu.h> 22#include <asm/nops.h> 23 24#ifdef CONFIG_PARAVIRT 25#include <asm/asm-offsets.h> 26#include <asm/paravirt.h> 27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 28#else 29#define GET_CR2_INTO(reg) movq %cr2, reg 30#define INTERRUPT_RETURN iretq 31#endif 32 33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 34 * because we need identity-mapped pages. 35 * 36 */ 37 38#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 39 40L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) 41L4_START_KERNEL = pgd_index(__START_KERNEL_map) 42L3_START_KERNEL = pud_index(__START_KERNEL_map) 43 44 .text 45 __HEAD 46 .code64 47 .globl startup_64 48startup_64: 49 /* 50 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 51 * and someone has loaded an identity mapped page table 52 * for us. These identity mapped page tables map all of the 53 * kernel pages and possibly all of memory. 54 * 55 * %rsi holds a physical pointer to real_mode_data. 56 * 57 * We come here either directly from a 64bit bootloader, or from 58 * arch/x86/boot/compressed/head_64.S. 59 * 60 * We only come here initially at boot nothing else comes here. 61 * 62 * Since we may be loaded at an address different from what we were 63 * compiled to run at we first fixup the physical addresses in our page 64 * tables and then reload them. 65 */ 66 67 /* Sanitize CPU configuration */ 68 call verify_cpu 69 70 /* 71 * Compute the delta between the address I am compiled to run at and the 72 * address I am actually running at. 73 */ 74 leaq _text(%rip), %rbp 75 subq $_text - __START_KERNEL_map, %rbp 76 77 /* Is the address not 2M aligned? */ 78 testl $~PMD_PAGE_MASK, %ebp 79 jnz bad_address 80 81 /* 82 * Is the address too large? 83 */ 84 leaq _text(%rip), %rax 85 shrq $MAX_PHYSMEM_BITS, %rax 86 jnz bad_address 87 88 /* 89 * Fixup the physical addresses in the page table 90 */ 91 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) 92 93 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 94 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 95 96 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 97 98 /* 99 * Set up the identity mapping for the switchover. These 100 * entries should *NOT* have the global bit set! This also 101 * creates a bunch of nonsense entries but that is fine -- 102 * it avoids problems around wraparound. 103 */ 104 leaq _text(%rip), %rdi 105 leaq early_level4_pgt(%rip), %rbx 106 107 movq %rdi, %rax 108 shrq $PGDIR_SHIFT, %rax 109 110 leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx 111 movq %rdx, 0(%rbx,%rax,8) 112 movq %rdx, 8(%rbx,%rax,8) 113 114 addq $4096, %rdx 115 movq %rdi, %rax 116 shrq $PUD_SHIFT, %rax 117 andl $(PTRS_PER_PUD-1), %eax 118 movq %rdx, 4096(%rbx,%rax,8) 119 incl %eax 120 andl $(PTRS_PER_PUD-1), %eax 121 movq %rdx, 4096(%rbx,%rax,8) 122 123 addq $8192, %rbx 124 movq %rdi, %rax 125 shrq $PMD_SHIFT, %rdi 126 addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax 127 leaq (_end - 1)(%rip), %rcx 128 shrq $PMD_SHIFT, %rcx 129 subq %rdi, %rcx 130 incl %ecx 131 1321: 133 andq $(PTRS_PER_PMD - 1), %rdi 134 movq %rax, (%rbx,%rdi,8) 135 incq %rdi 136 addq $PMD_SIZE, %rax 137 decl %ecx 138 jnz 1b 139 140 /* 141 * Fixup the kernel text+data virtual addresses. Note that 142 * we might write invalid pmds, when the kernel is relocated 143 * cleanup_highmap() fixes this up along with the mappings 144 * beyond _end. 145 */ 146 leaq level2_kernel_pgt(%rip), %rdi 147 leaq 4096(%rdi), %r8 148 /* See if it is a valid page table entry */ 1491: testb $1, 0(%rdi) 150 jz 2f 151 addq %rbp, 0(%rdi) 152 /* Go to the next page */ 1532: addq $8, %rdi 154 cmp %r8, %rdi 155 jne 1b 156 157 /* Fixup phys_base */ 158 addq %rbp, phys_base(%rip) 159 160 movq $(early_level4_pgt - __START_KERNEL_map), %rax 161 jmp 1f 162ENTRY(secondary_startup_64) 163 /* 164 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 165 * and someone has loaded a mapped page table. 166 * 167 * %rsi holds a physical pointer to real_mode_data. 168 * 169 * We come here either from startup_64 (using physical addresses) 170 * or from trampoline.S (using virtual addresses). 171 * 172 * Using virtual addresses from trampoline.S removes the need 173 * to have any identity mapped pages in the kernel page table 174 * after the boot processor executes this code. 175 */ 176 177 /* Sanitize CPU configuration */ 178 call verify_cpu 179 180 movq $(init_level4_pgt - __START_KERNEL_map), %rax 1811: 182 183 /* Enable PAE mode and PGE */ 184 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 185 movq %rcx, %cr4 186 187 /* Setup early boot stage 4 level pagetables. */ 188 addq phys_base(%rip), %rax 189 movq %rax, %cr3 190 191 /* Ensure I am executing from virtual addresses */ 192 movq $1f, %rax 193 jmp *%rax 1941: 195 196 /* Check if nx is implemented */ 197 movl $0x80000001, %eax 198 cpuid 199 movl %edx,%edi 200 201 /* Setup EFER (Extended Feature Enable Register) */ 202 movl $MSR_EFER, %ecx 203 rdmsr 204 btsl $_EFER_SCE, %eax /* Enable System Call */ 205 btl $20,%edi /* No Execute supported? */ 206 jnc 1f 207 btsl $_EFER_NX, %eax 208 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 2091: wrmsr /* Make changes effective */ 210 211 /* Setup cr0 */ 212#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 213 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 214 X86_CR0_PG) 215 movl $CR0_STATE, %eax 216 /* Make changes effective */ 217 movq %rax, %cr0 218 219 /* Setup a boot time stack */ 220 movq stack_start(%rip), %rsp 221 222 /* zero EFLAGS after setting rsp */ 223 pushq $0 224 popfq 225 226 /* 227 * We must switch to a new descriptor in kernel space for the GDT 228 * because soon the kernel won't have access anymore to the userspace 229 * addresses where we're currently running on. We have to do that here 230 * because in 32bit we couldn't load a 64bit linear address. 231 */ 232 lgdt early_gdt_descr(%rip) 233 234 /* set up data segments */ 235 xorl %eax,%eax 236 movl %eax,%ds 237 movl %eax,%ss 238 movl %eax,%es 239 240 /* 241 * We don't really need to load %fs or %gs, but load them anyway 242 * to kill any stale realmode selectors. This allows execution 243 * under VT hardware. 244 */ 245 movl %eax,%fs 246 movl %eax,%gs 247 248 /* Set up %gs. 249 * 250 * The base of %gs always points to the bottom of the irqstack 251 * union. If the stack protector canary is enabled, it is 252 * located at %gs:40. Note that, on SMP, the boot cpu uses 253 * init data section till per cpu areas are set up. 254 */ 255 movl $MSR_GS_BASE,%ecx 256 movl initial_gs(%rip),%eax 257 movl initial_gs+4(%rip),%edx 258 wrmsr 259 260 /* rsi is pointer to real mode structure with interesting info. 261 pass it to C */ 262 movq %rsi, %rdi 263 264 /* Finally jump to run C code and to be on real kernel address 265 * Since we are running on identity-mapped space we have to jump 266 * to the full 64bit address, this is only possible as indirect 267 * jump. In addition we need to ensure %cs is set so we make this 268 * a far return. 269 * 270 * Note: do not change to far jump indirect with 64bit offset. 271 * 272 * AMD does not support far jump indirect with 64bit offset. 273 * AMD64 Architecture Programmer's Manual, Volume 3: states only 274 * JMP FAR mem16:16 FF /5 Far jump indirect, 275 * with the target specified by a far pointer in memory. 276 * JMP FAR mem16:32 FF /5 Far jump indirect, 277 * with the target specified by a far pointer in memory. 278 * 279 * Intel64 does support 64bit offset. 280 * Software Developer Manual Vol 2: states: 281 * FF /5 JMP m16:16 Jump far, absolute indirect, 282 * address given in m16:16 283 * FF /5 JMP m16:32 Jump far, absolute indirect, 284 * address given in m16:32. 285 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 286 * address given in m16:64. 287 */ 288 movq initial_code(%rip),%rax 289 pushq $0 # fake return address to stop unwinder 290 pushq $__KERNEL_CS # set correct cs 291 pushq %rax # target address in negative space 292 lretq 293 294#include "verify_cpu.S" 295 296#ifdef CONFIG_HOTPLUG_CPU 297/* 298 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 299 * up already except stack. We just set up stack here. Then call 300 * start_secondary(). 301 */ 302ENTRY(start_cpu0) 303 movq stack_start(%rip),%rsp 304 movq initial_code(%rip),%rax 305 pushq $0 # fake return address to stop unwinder 306 pushq $__KERNEL_CS # set correct cs 307 pushq %rax # target address in negative space 308 lretq 309ENDPROC(start_cpu0) 310#endif 311 312 /* SMP bootup changes these two */ 313 __REFDATA 314 .balign 8 315 GLOBAL(initial_code) 316 .quad x86_64_start_kernel 317 GLOBAL(initial_gs) 318 .quad INIT_PER_CPU_VAR(irq_stack_union) 319 320 GLOBAL(stack_start) 321 .quad init_thread_union+THREAD_SIZE-8 322 .word 0 323 __FINITDATA 324 325bad_address: 326 jmp bad_address 327 328 __INIT 329ENTRY(early_idt_handler_array) 330 # 104(%rsp) %rflags 331 # 96(%rsp) %cs 332 # 88(%rsp) %rip 333 # 80(%rsp) error code 334 i = 0 335 .rept NUM_EXCEPTION_VECTORS 336 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 337 pushq $0 # Dummy error code, to make stack frame uniform 338 .endif 339 pushq $i # 72(%rsp) Vector number 340 jmp early_idt_handler_common 341 i = i + 1 342 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 343 .endr 344ENDPROC(early_idt_handler_array) 345 346early_idt_handler_common: 347 /* 348 * The stack is the hardware frame, an error code or zero, and the 349 * vector number. 350 */ 351 cld 352 353 cmpl $2,(%rsp) # X86_TRAP_NMI 354 je .Lis_nmi # Ignore NMI 355 356 cmpl $2,early_recursion_flag(%rip) 357 jz 1f 358 incl early_recursion_flag(%rip) 359 360 pushq %rax # 64(%rsp) 361 pushq %rcx # 56(%rsp) 362 pushq %rdx # 48(%rsp) 363 pushq %rsi # 40(%rsp) 364 pushq %rdi # 32(%rsp) 365 pushq %r8 # 24(%rsp) 366 pushq %r9 # 16(%rsp) 367 pushq %r10 # 8(%rsp) 368 pushq %r11 # 0(%rsp) 369 370 cmpl $__KERNEL_CS,96(%rsp) 371 jne 11f 372 373 cmpl $14,72(%rsp) # Page fault? 374 jnz 10f 375 GET_CR2_INTO(%rdi) # can clobber any volatile register if pv 376 call early_make_pgtable 377 andl %eax,%eax 378 jz 20f # All good 379 38010: 381 leaq 88(%rsp),%rdi # Pointer to %rip 382 call early_fixup_exception 383 andl %eax,%eax 384 jnz 20f # Found an exception entry 385 38611: 387#ifdef CONFIG_EARLY_PRINTK 388 GET_CR2_INTO(%r9) # can clobber any volatile register if pv 389 movl 80(%rsp),%r8d # error code 390 movl 72(%rsp),%esi # vector number 391 movl 96(%rsp),%edx # %cs 392 movq 88(%rsp),%rcx # %rip 393 xorl %eax,%eax 394 leaq early_idt_msg(%rip),%rdi 395 call early_printk 396 cmpl $2,early_recursion_flag(%rip) 397 jz 1f 398 call dump_stack 399#ifdef CONFIG_KALLSYMS 400 leaq early_idt_ripmsg(%rip),%rdi 401 movq 40(%rsp),%rsi # %rip again 402 call __print_symbol 403#endif 404#endif /* EARLY_PRINTK */ 4051: hlt 406 jmp 1b 407 40820: # Exception table entry found or page table generated 409 popq %r11 410 popq %r10 411 popq %r9 412 popq %r8 413 popq %rdi 414 popq %rsi 415 popq %rdx 416 popq %rcx 417 popq %rax 418 decl early_recursion_flag(%rip) 419.Lis_nmi: 420 addq $16,%rsp # drop vector number and error code 421 INTERRUPT_RETURN 422ENDPROC(early_idt_handler_common) 423 424 __INITDATA 425 426 .balign 4 427early_recursion_flag: 428 .long 0 429 430#ifdef CONFIG_EARLY_PRINTK 431early_idt_msg: 432 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 433early_idt_ripmsg: 434 .asciz "RIP %s\n" 435#endif /* CONFIG_EARLY_PRINTK */ 436 437#define NEXT_PAGE(name) \ 438 .balign PAGE_SIZE; \ 439GLOBAL(name) 440 441/* Automate the creation of 1 to 1 mapping pmd entries */ 442#define PMDS(START, PERM, COUNT) \ 443 i = 0 ; \ 444 .rept (COUNT) ; \ 445 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 446 i = i + 1 ; \ 447 .endr 448 449 __INITDATA 450NEXT_PAGE(early_level4_pgt) 451 .fill 511,8,0 452 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 453 454NEXT_PAGE(early_dynamic_pgts) 455 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 456 457 .data 458 459#ifndef CONFIG_XEN 460NEXT_PAGE(init_level4_pgt) 461 .fill 512,8,0 462#else 463NEXT_PAGE(init_level4_pgt) 464 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 465 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 466 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 467 .org init_level4_pgt + L4_START_KERNEL*8, 0 468 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 469 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 470 471NEXT_PAGE(level3_ident_pgt) 472 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 473 .fill 511, 8, 0 474NEXT_PAGE(level2_ident_pgt) 475 /* Since I easily can, map the first 1G. 476 * Don't set NX because code runs from these pages. 477 */ 478 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 479#endif 480 481NEXT_PAGE(level3_kernel_pgt) 482 .fill L3_START_KERNEL,8,0 483 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 484 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 485 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 486 487NEXT_PAGE(level2_kernel_pgt) 488 /* 489 * 512 MB kernel mapping. We spend a full page on this pagetable 490 * anyway. 491 * 492 * The kernel code+data+bss must not be bigger than that. 493 * 494 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 495 * If you want to increase this then increase MODULES_VADDR 496 * too.) 497 */ 498 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 499 KERNEL_IMAGE_SIZE/PMD_SIZE) 500 501NEXT_PAGE(level2_fixmap_pgt) 502 .fill 506,8,0 503 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 504 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 505 .fill 5,8,0 506 507NEXT_PAGE(level1_fixmap_pgt) 508 .fill 512,8,0 509 510#undef PMDS 511 512 .data 513 .align 16 514 .globl early_gdt_descr 515early_gdt_descr: 516 .word GDT_ENTRIES*8-1 517early_gdt_descr_base: 518 .quad INIT_PER_CPU_VAR(gdt_page) 519 520ENTRY(phys_base) 521 /* This must match the first entry in level2_kernel_pgt */ 522 .quad 0x0000000000000000 523 524#include "../../x86/xen/xen-head.S" 525 526 __PAGE_ALIGNED_BSS 527NEXT_PAGE(empty_zero_page) 528 .skip PAGE_SIZE 529 530