1/* 2 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/segment.h> 16#include <asm/pgtable.h> 17#include <asm/page.h> 18#include <asm/msr.h> 19#include <asm/cache.h> 20#include <asm/processor-flags.h> 21#include <asm/percpu.h> 22#include <asm/nops.h> 23#include "../entry/calling.h" 24#include <asm/export.h> 25 26#ifdef CONFIG_PARAVIRT 27#include <asm/asm-offsets.h> 28#include <asm/paravirt.h> 29#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 30#else 31#define GET_CR2_INTO(reg) movq %cr2, reg 32#define INTERRUPT_RETURN iretq 33#endif 34 35/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 36 * because we need identity-mapped pages. 37 * 38 */ 39 40#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 41 42L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) 43L4_START_KERNEL = pgd_index(__START_KERNEL_map) 44L3_START_KERNEL = pud_index(__START_KERNEL_map) 45 46 .text 47 __HEAD 48 .code64 49 .globl startup_64 50startup_64: 51 /* 52 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 53 * and someone has loaded an identity mapped page table 54 * for us. These identity mapped page tables map all of the 55 * kernel pages and possibly all of memory. 56 * 57 * %rsi holds a physical pointer to real_mode_data. 58 * 59 * We come here either directly from a 64bit bootloader, or from 60 * arch/x86/boot/compressed/head_64.S. 61 * 62 * We only come here initially at boot nothing else comes here. 63 * 64 * Since we may be loaded at an address different from what we were 65 * compiled to run at we first fixup the physical addresses in our page 66 * tables and then reload them. 67 */ 68 69 /* Set up the stack for verify_cpu(), similar to initial_stack below */ 70 leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp 71 72 /* Sanitize CPU configuration */ 73 call verify_cpu 74 75 /* 76 * Compute the delta between the address I am compiled to run at and the 77 * address I am actually running at. 78 */ 79 leaq _text(%rip), %rbp 80 subq $_text - __START_KERNEL_map, %rbp 81 82 /* Is the address not 2M aligned? */ 83 testl $~PMD_PAGE_MASK, %ebp 84 jnz bad_address 85 86 /* 87 * Is the address too large? 88 */ 89 leaq _text(%rip), %rax 90 shrq $MAX_PHYSMEM_BITS, %rax 91 jnz bad_address 92 93 /* 94 * Fixup the physical addresses in the page table 95 */ 96 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) 97 98 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 99 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 100 101 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 102 103 /* 104 * Set up the identity mapping for the switchover. These 105 * entries should *NOT* have the global bit set! This also 106 * creates a bunch of nonsense entries but that is fine -- 107 * it avoids problems around wraparound. 108 */ 109 leaq _text(%rip), %rdi 110 leaq early_level4_pgt(%rip), %rbx 111 112 movq %rdi, %rax 113 shrq $PGDIR_SHIFT, %rax 114 115 leaq (PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx 116 movq %rdx, 0(%rbx,%rax,8) 117 movq %rdx, 8(%rbx,%rax,8) 118 119 addq $PAGE_SIZE, %rdx 120 movq %rdi, %rax 121 shrq $PUD_SHIFT, %rax 122 andl $(PTRS_PER_PUD-1), %eax 123 movq %rdx, PAGE_SIZE(%rbx,%rax,8) 124 incl %eax 125 andl $(PTRS_PER_PUD-1), %eax 126 movq %rdx, PAGE_SIZE(%rbx,%rax,8) 127 128 addq $PAGE_SIZE * 2, %rbx 129 movq %rdi, %rax 130 shrq $PMD_SHIFT, %rdi 131 addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax 132 leaq (_end - 1)(%rip), %rcx 133 shrq $PMD_SHIFT, %rcx 134 subq %rdi, %rcx 135 incl %ecx 136 1371: 138 andq $(PTRS_PER_PMD - 1), %rdi 139 movq %rax, (%rbx,%rdi,8) 140 incq %rdi 141 addq $PMD_SIZE, %rax 142 decl %ecx 143 jnz 1b 144 145 /* 146 * Fixup the kernel text+data virtual addresses. Note that 147 * we might write invalid pmds, when the kernel is relocated 148 * cleanup_highmap() fixes this up along with the mappings 149 * beyond _end. 150 */ 151 leaq level2_kernel_pgt(%rip), %rdi 152 leaq 4096(%rdi), %r8 153 /* See if it is a valid page table entry */ 1541: testb $1, 0(%rdi) 155 jz 2f 156 addq %rbp, 0(%rdi) 157 /* Go to the next page */ 1582: addq $8, %rdi 159 cmp %r8, %rdi 160 jne 1b 161 162 /* Fixup phys_base */ 163 addq %rbp, phys_base(%rip) 164 165 movq $(early_level4_pgt - __START_KERNEL_map), %rax 166 jmp 1f 167ENTRY(secondary_startup_64) 168 /* 169 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 170 * and someone has loaded a mapped page table. 171 * 172 * %rsi holds a physical pointer to real_mode_data. 173 * 174 * We come here either from startup_64 (using physical addresses) 175 * or from trampoline.S (using virtual addresses). 176 * 177 * Using virtual addresses from trampoline.S removes the need 178 * to have any identity mapped pages in the kernel page table 179 * after the boot processor executes this code. 180 */ 181 182 /* Sanitize CPU configuration */ 183 call verify_cpu 184 185 movq $(init_level4_pgt - __START_KERNEL_map), %rax 1861: 187 188 /* Enable PAE mode and PGE */ 189 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 190 movq %rcx, %cr4 191 192 /* Setup early boot stage 4 level pagetables. */ 193 addq phys_base(%rip), %rax 194 movq %rax, %cr3 195 196 /* Ensure I am executing from virtual addresses */ 197 movq $1f, %rax 198 jmp *%rax 1991: 200 201 /* Check if nx is implemented */ 202 movl $0x80000001, %eax 203 cpuid 204 movl %edx,%edi 205 206 /* Setup EFER (Extended Feature Enable Register) */ 207 movl $MSR_EFER, %ecx 208 rdmsr 209 btsl $_EFER_SCE, %eax /* Enable System Call */ 210 btl $20,%edi /* No Execute supported? */ 211 jnc 1f 212 btsl $_EFER_NX, %eax 213 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 2141: wrmsr /* Make changes effective */ 215 216 /* Setup cr0 */ 217#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 218 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 219 X86_CR0_PG) 220 movl $CR0_STATE, %eax 221 /* Make changes effective */ 222 movq %rax, %cr0 223 224 /* Setup a boot time stack */ 225 movq initial_stack(%rip), %rsp 226 227 /* zero EFLAGS after setting rsp */ 228 pushq $0 229 popfq 230 231 /* 232 * We must switch to a new descriptor in kernel space for the GDT 233 * because soon the kernel won't have access anymore to the userspace 234 * addresses where we're currently running on. We have to do that here 235 * because in 32bit we couldn't load a 64bit linear address. 236 */ 237 lgdt early_gdt_descr(%rip) 238 239 /* set up data segments */ 240 xorl %eax,%eax 241 movl %eax,%ds 242 movl %eax,%ss 243 movl %eax,%es 244 245 /* 246 * We don't really need to load %fs or %gs, but load them anyway 247 * to kill any stale realmode selectors. This allows execution 248 * under VT hardware. 249 */ 250 movl %eax,%fs 251 movl %eax,%gs 252 253 /* Set up %gs. 254 * 255 * The base of %gs always points to the bottom of the irqstack 256 * union. If the stack protector canary is enabled, it is 257 * located at %gs:40. Note that, on SMP, the boot cpu uses 258 * init data section till per cpu areas are set up. 259 */ 260 movl $MSR_GS_BASE,%ecx 261 movl initial_gs(%rip),%eax 262 movl initial_gs+4(%rip),%edx 263 wrmsr 264 265 /* rsi is pointer to real mode structure with interesting info. 266 pass it to C */ 267 movq %rsi, %rdi 268 jmp start_cpu 269ENDPROC(secondary_startup_64) 270 271ENTRY(start_cpu) 272 /* 273 * Jump to run C code and to be on a real kernel address. 274 * Since we are running on identity-mapped space we have to jump 275 * to the full 64bit address, this is only possible as indirect 276 * jump. In addition we need to ensure %cs is set so we make this 277 * a far return. 278 * 279 * Note: do not change to far jump indirect with 64bit offset. 280 * 281 * AMD does not support far jump indirect with 64bit offset. 282 * AMD64 Architecture Programmer's Manual, Volume 3: states only 283 * JMP FAR mem16:16 FF /5 Far jump indirect, 284 * with the target specified by a far pointer in memory. 285 * JMP FAR mem16:32 FF /5 Far jump indirect, 286 * with the target specified by a far pointer in memory. 287 * 288 * Intel64 does support 64bit offset. 289 * Software Developer Manual Vol 2: states: 290 * FF /5 JMP m16:16 Jump far, absolute indirect, 291 * address given in m16:16 292 * FF /5 JMP m16:32 Jump far, absolute indirect, 293 * address given in m16:32. 294 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 295 * address given in m16:64. 296 */ 297 call 1f # put return address on stack for unwinder 2981: xorq %rbp, %rbp # clear frame pointer 299 movq initial_code(%rip), %rax 300 pushq $__KERNEL_CS # set correct cs 301 pushq %rax # target address in negative space 302 lretq 303ENDPROC(start_cpu) 304 305#include "verify_cpu.S" 306 307#ifdef CONFIG_HOTPLUG_CPU 308/* 309 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 310 * up already except stack. We just set up stack here. Then call 311 * start_secondary() via start_cpu(). 312 */ 313ENTRY(start_cpu0) 314 movq initial_stack(%rip), %rsp 315 jmp start_cpu 316ENDPROC(start_cpu0) 317#endif 318 319 /* Both SMP bootup and ACPI suspend change these variables */ 320 __REFDATA 321 .balign 8 322 GLOBAL(initial_code) 323 .quad x86_64_start_kernel 324 GLOBAL(initial_gs) 325 .quad INIT_PER_CPU_VAR(irq_stack_union) 326 GLOBAL(initial_stack) 327 /* 328 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel 329 * unwinder reliably detect the end of the stack. 330 */ 331 .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS 332 __FINITDATA 333 334bad_address: 335 jmp bad_address 336 337 __INIT 338ENTRY(early_idt_handler_array) 339 # 104(%rsp) %rflags 340 # 96(%rsp) %cs 341 # 88(%rsp) %rip 342 # 80(%rsp) error code 343 i = 0 344 .rept NUM_EXCEPTION_VECTORS 345 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 346 pushq $0 # Dummy error code, to make stack frame uniform 347 .endif 348 pushq $i # 72(%rsp) Vector number 349 jmp early_idt_handler_common 350 i = i + 1 351 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 352 .endr 353ENDPROC(early_idt_handler_array) 354 355early_idt_handler_common: 356 /* 357 * The stack is the hardware frame, an error code or zero, and the 358 * vector number. 359 */ 360 cld 361 362 incl early_recursion_flag(%rip) 363 364 /* The vector number is currently in the pt_regs->di slot. */ 365 pushq %rsi /* pt_regs->si */ 366 movq 8(%rsp), %rsi /* RSI = vector number */ 367 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 368 pushq %rdx /* pt_regs->dx */ 369 pushq %rcx /* pt_regs->cx */ 370 pushq %rax /* pt_regs->ax */ 371 pushq %r8 /* pt_regs->r8 */ 372 pushq %r9 /* pt_regs->r9 */ 373 pushq %r10 /* pt_regs->r10 */ 374 pushq %r11 /* pt_regs->r11 */ 375 pushq %rbx /* pt_regs->bx */ 376 pushq %rbp /* pt_regs->bp */ 377 pushq %r12 /* pt_regs->r12 */ 378 pushq %r13 /* pt_regs->r13 */ 379 pushq %r14 /* pt_regs->r14 */ 380 pushq %r15 /* pt_regs->r15 */ 381 382 cmpq $14,%rsi /* Page fault? */ 383 jnz 10f 384 GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */ 385 call early_make_pgtable 386 andl %eax,%eax 387 jz 20f /* All good */ 388 38910: 390 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 391 call early_fixup_exception 392 39320: 394 decl early_recursion_flag(%rip) 395 jmp restore_regs_and_iret 396ENDPROC(early_idt_handler_common) 397 398 __INITDATA 399 400 .balign 4 401GLOBAL(early_recursion_flag) 402 .long 0 403 404#define NEXT_PAGE(name) \ 405 .balign PAGE_SIZE; \ 406GLOBAL(name) 407 408/* Automate the creation of 1 to 1 mapping pmd entries */ 409#define PMDS(START, PERM, COUNT) \ 410 i = 0 ; \ 411 .rept (COUNT) ; \ 412 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 413 i = i + 1 ; \ 414 .endr 415 416 __INITDATA 417NEXT_PAGE(early_level4_pgt) 418 .fill 511,8,0 419 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 420 421NEXT_PAGE(early_dynamic_pgts) 422 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 423 424 .data 425 426#ifndef CONFIG_XEN 427NEXT_PAGE(init_level4_pgt) 428 .fill 512,8,0 429#else 430NEXT_PAGE(init_level4_pgt) 431 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 432 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 433 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 434 .org init_level4_pgt + L4_START_KERNEL*8, 0 435 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 436 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 437 438NEXT_PAGE(level3_ident_pgt) 439 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 440 .fill 511, 8, 0 441NEXT_PAGE(level2_ident_pgt) 442 /* Since I easily can, map the first 1G. 443 * Don't set NX because code runs from these pages. 444 */ 445 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 446#endif 447 448NEXT_PAGE(level3_kernel_pgt) 449 .fill L3_START_KERNEL,8,0 450 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 451 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 452 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 453 454NEXT_PAGE(level2_kernel_pgt) 455 /* 456 * 512 MB kernel mapping. We spend a full page on this pagetable 457 * anyway. 458 * 459 * The kernel code+data+bss must not be bigger than that. 460 * 461 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 462 * If you want to increase this then increase MODULES_VADDR 463 * too.) 464 */ 465 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 466 KERNEL_IMAGE_SIZE/PMD_SIZE) 467 468NEXT_PAGE(level2_fixmap_pgt) 469 .fill 506,8,0 470 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 471 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 472 .fill 5,8,0 473 474NEXT_PAGE(level1_fixmap_pgt) 475 .fill 512,8,0 476 477#undef PMDS 478 479 .data 480 .align 16 481 .globl early_gdt_descr 482early_gdt_descr: 483 .word GDT_ENTRIES*8-1 484early_gdt_descr_base: 485 .quad INIT_PER_CPU_VAR(gdt_page) 486 487ENTRY(phys_base) 488 /* This must match the first entry in level2_kernel_pgt */ 489 .quad 0x0000000000000000 490EXPORT_SYMBOL(phys_base) 491 492#include "../../x86/xen/xen-head.S" 493 494 __PAGE_ALIGNED_BSS 495NEXT_PAGE(empty_zero_page) 496 .skip PAGE_SIZE 497EXPORT_SYMBOL(empty_zero_page) 498 499