1/* 2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/desc.h> 16#include <asm/segment.h> 17#include <asm/pgtable.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21#include <asm/processor-flags.h> 22#include <asm/percpu.h> 23 24#ifdef CONFIG_PARAVIRT 25#include <asm/asm-offsets.h> 26#include <asm/paravirt.h> 27#else 28#define GET_CR2_INTO_RCX movq %cr2, %rcx 29#endif 30 31/* we are not able to switch in one step to the final KERNEL ADRESS SPACE 32 * because we need identity-mapped pages. 33 * 34 */ 35 36#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 37 38L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) 39L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) 40L4_START_KERNEL = pgd_index(__START_KERNEL_map) 41L3_START_KERNEL = pud_index(__START_KERNEL_map) 42 43 .text 44 .section .text.head 45 .code64 46 .globl startup_64 47startup_64: 48 49 /* 50 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 51 * and someone has loaded an identity mapped page table 52 * for us. These identity mapped page tables map all of the 53 * kernel pages and possibly all of memory. 54 * 55 * %esi holds a physical pointer to real_mode_data. 56 * 57 * We come here either directly from a 64bit bootloader, or from 58 * arch/x86_64/boot/compressed/head.S. 59 * 60 * We only come here initially at boot nothing else comes here. 61 * 62 * Since we may be loaded at an address different from what we were 63 * compiled to run at we first fixup the physical addresses in our page 64 * tables and then reload them. 65 */ 66 67 /* Compute the delta between the address I am compiled to run at and the 68 * address I am actually running at. 69 */ 70 leaq _text(%rip), %rbp 71 subq $_text - __START_KERNEL_map, %rbp 72 73 /* Is the address not 2M aligned? */ 74 movq %rbp, %rax 75 andl $~PMD_PAGE_MASK, %eax 76 testl %eax, %eax 77 jnz bad_address 78 79 /* Is the address too large? */ 80 leaq _text(%rip), %rdx 81 movq $PGDIR_SIZE, %rax 82 cmpq %rax, %rdx 83 jae bad_address 84 85 /* Fixup the physical addresses in the page table 86 */ 87 addq %rbp, init_level4_pgt + 0(%rip) 88 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) 89 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) 90 91 addq %rbp, level3_ident_pgt + 0(%rip) 92 93 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 94 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 95 96 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 97 98 /* Add an Identity mapping if I am above 1G */ 99 leaq _text(%rip), %rdi 100 andq $PMD_PAGE_MASK, %rdi 101 102 movq %rdi, %rax 103 shrq $PUD_SHIFT, %rax 104 andq $(PTRS_PER_PUD - 1), %rax 105 jz ident_complete 106 107 leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx 108 leaq level3_ident_pgt(%rip), %rbx 109 movq %rdx, 0(%rbx, %rax, 8) 110 111 movq %rdi, %rax 112 shrq $PMD_SHIFT, %rax 113 andq $(PTRS_PER_PMD - 1), %rax 114 leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx 115 leaq level2_spare_pgt(%rip), %rbx 116 movq %rdx, 0(%rbx, %rax, 8) 117ident_complete: 118 119 /* 120 * Fixup the kernel text+data virtual addresses. Note that 121 * we might write invalid pmds, when the kernel is relocated 122 * cleanup_highmap() fixes this up along with the mappings 123 * beyond _end. 124 */ 125 126 leaq level2_kernel_pgt(%rip), %rdi 127 leaq 4096(%rdi), %r8 128 /* See if it is a valid page table entry */ 1291: testq $1, 0(%rdi) 130 jz 2f 131 addq %rbp, 0(%rdi) 132 /* Go to the next page */ 1332: addq $8, %rdi 134 cmp %r8, %rdi 135 jne 1b 136 137 /* Fixup phys_base */ 138 addq %rbp, phys_base(%rip) 139 140#ifdef CONFIG_X86_TRAMPOLINE 141 addq %rbp, trampoline_level4_pgt + 0(%rip) 142 addq %rbp, trampoline_level4_pgt + (511*8)(%rip) 143#endif 144 145 /* Due to ENTRY(), sometimes the empty space gets filled with 146 * zeros. Better take a jmp than relying on empty space being 147 * filled with 0x90 (nop) 148 */ 149 jmp secondary_startup_64 150ENTRY(secondary_startup_64) 151 /* 152 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 153 * and someone has loaded a mapped page table. 154 * 155 * %esi holds a physical pointer to real_mode_data. 156 * 157 * We come here either from startup_64 (using physical addresses) 158 * or from trampoline.S (using virtual addresses). 159 * 160 * Using virtual addresses from trampoline.S removes the need 161 * to have any identity mapped pages in the kernel page table 162 * after the boot processor executes this code. 163 */ 164 165 /* Enable PAE mode and PGE */ 166 movl $(X86_CR4_PAE | X86_CR4_PGE), %eax 167 movq %rax, %cr4 168 169 /* Setup early boot stage 4 level pagetables. */ 170 movq $(init_level4_pgt - __START_KERNEL_map), %rax 171 addq phys_base(%rip), %rax 172 movq %rax, %cr3 173 174 /* Ensure I am executing from virtual addresses */ 175 movq $1f, %rax 176 jmp *%rax 1771: 178 179 /* Check if nx is implemented */ 180 movl $0x80000001, %eax 181 cpuid 182 movl %edx,%edi 183 184 /* Setup EFER (Extended Feature Enable Register) */ 185 movl $MSR_EFER, %ecx 186 rdmsr 187 btsl $_EFER_SCE, %eax /* Enable System Call */ 188 btl $20,%edi /* No Execute supported? */ 189 jnc 1f 190 btsl $_EFER_NX, %eax 1911: wrmsr /* Make changes effective */ 192 193 /* Setup cr0 */ 194#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 195 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 196 X86_CR0_PG) 197 movl $CR0_STATE, %eax 198 /* Make changes effective */ 199 movq %rax, %cr0 200 201 /* Setup a boot time stack */ 202 movq stack_start(%rip),%rsp 203 204 /* zero EFLAGS after setting rsp */ 205 pushq $0 206 popfq 207 208#ifdef CONFIG_SMP 209 /* 210 * Fix up static pointers that need __per_cpu_load added. The assembler 211 * is unable to do this directly. This is only needed for the boot cpu. 212 * These values are set up with the correct base addresses by C code for 213 * secondary cpus. 214 */ 215 movq initial_gs(%rip), %rax 216 cmpl $0, per_cpu__cpu_number(%rax) 217 jne 1f 218 addq %rax, early_gdt_descr_base(%rip) 2191: 220#endif 221 /* 222 * We must switch to a new descriptor in kernel space for the GDT 223 * because soon the kernel won't have access anymore to the userspace 224 * addresses where we're currently running on. We have to do that here 225 * because in 32bit we couldn't load a 64bit linear address. 226 */ 227 lgdt early_gdt_descr(%rip) 228 229 /* set up data segments. actually 0 would do too */ 230 movl $__KERNEL_DS,%eax 231 movl %eax,%ds 232 movl %eax,%ss 233 movl %eax,%es 234 235 /* 236 * We don't really need to load %fs or %gs, but load them anyway 237 * to kill any stale realmode selectors. This allows execution 238 * under VT hardware. 239 */ 240 movl %eax,%fs 241 movl %eax,%gs 242 243 /* Set up %gs. 244 * 245 * The base of %gs always points to the bottom of the irqstack 246 * union. If the stack protector canary is enabled, it is 247 * located at %gs:40. Note that, on SMP, the boot cpu uses 248 * init data section till per cpu areas are set up. 249 */ 250 movl $MSR_GS_BASE,%ecx 251 movq initial_gs(%rip),%rax 252 movq %rax,%rdx 253 shrq $32,%rdx 254 wrmsr 255 256 /* esi is pointer to real mode structure with interesting info. 257 pass it to C */ 258 movl %esi, %edi 259 260 /* Finally jump to run C code and to be on real kernel address 261 * Since we are running on identity-mapped space we have to jump 262 * to the full 64bit address, this is only possible as indirect 263 * jump. In addition we need to ensure %cs is set so we make this 264 * a far return. 265 */ 266 movq initial_code(%rip),%rax 267 pushq $0 # fake return address to stop unwinder 268 pushq $__KERNEL_CS # set correct cs 269 pushq %rax # target address in negative space 270 lretq 271 272 /* SMP bootup changes these two */ 273 __REFDATA 274 .align 8 275 ENTRY(initial_code) 276 .quad x86_64_start_kernel 277 ENTRY(initial_gs) 278#ifdef CONFIG_SMP 279 .quad __per_cpu_load 280#else 281 .quad PER_CPU_VAR(irq_stack_union) 282#endif 283 __FINITDATA 284 285 ENTRY(stack_start) 286 .quad init_thread_union+THREAD_SIZE-8 287 .word 0 288 289bad_address: 290 jmp bad_address 291 292 .section ".init.text","ax" 293#ifdef CONFIG_EARLY_PRINTK 294 .globl early_idt_handlers 295early_idt_handlers: 296 i = 0 297 .rept NUM_EXCEPTION_VECTORS 298 movl $i, %esi 299 jmp early_idt_handler 300 i = i + 1 301 .endr 302#endif 303 304ENTRY(early_idt_handler) 305#ifdef CONFIG_EARLY_PRINTK 306 cmpl $2,early_recursion_flag(%rip) 307 jz 1f 308 incl early_recursion_flag(%rip) 309 GET_CR2_INTO_RCX 310 movq %rcx,%r9 311 xorl %r8d,%r8d # zero for error code 312 movl %esi,%ecx # get vector number 313 # Test %ecx against mask of vectors that push error code. 314 cmpl $31,%ecx 315 ja 0f 316 movl $1,%eax 317 salq %cl,%rax 318 testl $0x27d00,%eax 319 je 0f 320 popq %r8 # get error code 3210: movq 0(%rsp),%rcx # get ip 322 movq 8(%rsp),%rdx # get cs 323 xorl %eax,%eax 324 leaq early_idt_msg(%rip),%rdi 325 call early_printk 326 cmpl $2,early_recursion_flag(%rip) 327 jz 1f 328 call dump_stack 329#ifdef CONFIG_KALLSYMS 330 leaq early_idt_ripmsg(%rip),%rdi 331 movq 0(%rsp),%rsi # get rip again 332 call __print_symbol 333#endif 334#endif /* EARLY_PRINTK */ 3351: hlt 336 jmp 1b 337 338#ifdef CONFIG_EARLY_PRINTK 339early_recursion_flag: 340 .long 0 341 342early_idt_msg: 343 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 344early_idt_ripmsg: 345 .asciz "RIP %s\n" 346#endif /* CONFIG_EARLY_PRINTK */ 347 .previous 348 349.balign PAGE_SIZE 350 351#define NEXT_PAGE(name) \ 352 .balign PAGE_SIZE; \ 353ENTRY(name) 354 355/* Automate the creation of 1 to 1 mapping pmd entries */ 356#define PMDS(START, PERM, COUNT) \ 357 i = 0 ; \ 358 .rept (COUNT) ; \ 359 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 360 i = i + 1 ; \ 361 .endr 362 363 /* 364 * This default setting generates an ident mapping at address 0x100000 365 * and a mapping for the kernel that precisely maps virtual address 366 * 0xffffffff80000000 to physical address 0x000000. (always using 367 * 2Mbyte large pages provided by PAE mode) 368 */ 369NEXT_PAGE(init_level4_pgt) 370 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 371 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 372 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 373 .org init_level4_pgt + L4_START_KERNEL*8, 0 374 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 375 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 376 377NEXT_PAGE(level3_ident_pgt) 378 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 379 .fill 511,8,0 380 381NEXT_PAGE(level3_kernel_pgt) 382 .fill L3_START_KERNEL,8,0 383 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 384 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 385 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 386 387NEXT_PAGE(level2_fixmap_pgt) 388 .fill 506,8,0 389 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 390 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 391 .fill 5,8,0 392 393NEXT_PAGE(level1_fixmap_pgt) 394 .fill 512,8,0 395 396NEXT_PAGE(level2_ident_pgt) 397 /* Since I easily can, map the first 1G. 398 * Don't set NX because code runs from these pages. 399 */ 400 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 401 402NEXT_PAGE(level2_kernel_pgt) 403 /* 404 * 512 MB kernel mapping. We spend a full page on this pagetable 405 * anyway. 406 * 407 * The kernel code+data+bss must not be bigger than that. 408 * 409 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 410 * If you want to increase this then increase MODULES_VADDR 411 * too.) 412 */ 413 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 414 KERNEL_IMAGE_SIZE/PMD_SIZE) 415 416NEXT_PAGE(level2_spare_pgt) 417 .fill 512, 8, 0 418 419#undef PMDS 420#undef NEXT_PAGE 421 422 .data 423 .align 16 424 .globl early_gdt_descr 425early_gdt_descr: 426 .word GDT_ENTRIES*8-1 427early_gdt_descr_base: 428 .quad per_cpu__gdt_page 429 430ENTRY(phys_base) 431 /* This must match the first entry in level2_kernel_pgt */ 432 .quad 0x0000000000000000 433 434#include "../../x86/xen/xen-head.S" 435 436 .section .bss, "aw", @nobits 437 .align L1_CACHE_BYTES 438ENTRY(idt_table) 439 .skip 256 * 16 440 441 .section .bss.page_aligned, "aw", @nobits 442 .align PAGE_SIZE 443ENTRY(empty_zero_page) 444 .skip PAGE_SIZE 445