1/* 2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/desc.h> 16#include <asm/segment.h> 17#include <asm/pgtable.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21#include <asm/processor-flags.h> 22 23#ifdef CONFIG_PARAVIRT 24#include <asm/asm-offsets.h> 25#include <asm/paravirt.h> 26#else 27#define GET_CR2_INTO_RCX movq %cr2, %rcx 28#endif 29 30/* we are not able to switch in one step to the final KERNEL ADRESS SPACE 31 * because we need identity-mapped pages. 32 * 33 */ 34 35 .text 36 .section .text.head 37 .code64 38 .globl startup_64 39startup_64: 40 41 /* 42 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 43 * and someone has loaded an identity mapped page table 44 * for us. These identity mapped page tables map all of the 45 * kernel pages and possibly all of memory. 46 * 47 * %esi holds a physical pointer to real_mode_data. 48 * 49 * We come here either directly from a 64bit bootloader, or from 50 * arch/x86_64/boot/compressed/head.S. 51 * 52 * We only come here initially at boot nothing else comes here. 53 * 54 * Since we may be loaded at an address different from what we were 55 * compiled to run at we first fixup the physical addresses in our page 56 * tables and then reload them. 57 */ 58 59 /* Compute the delta between the address I am compiled to run at and the 60 * address I am actually running at. 61 */ 62 leaq _text(%rip), %rbp 63 subq $_text - __START_KERNEL_map, %rbp 64 65 /* Is the address not 2M aligned? */ 66 movq %rbp, %rax 67 andl $~PMD_PAGE_MASK, %eax 68 testl %eax, %eax 69 jnz bad_address 70 71 /* Is the address too large? */ 72 leaq _text(%rip), %rdx 73 movq $PGDIR_SIZE, %rax 74 cmpq %rax, %rdx 75 jae bad_address 76 77 /* Fixup the physical addresses in the page table 78 */ 79 addq %rbp, init_level4_pgt + 0(%rip) 80 addq %rbp, init_level4_pgt + (258*8)(%rip) 81 addq %rbp, init_level4_pgt + (511*8)(%rip) 82 83 addq %rbp, level3_ident_pgt + 0(%rip) 84 85 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 86 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 87 88 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 89 90 /* Add an Identity mapping if I am above 1G */ 91 leaq _text(%rip), %rdi 92 andq $PMD_PAGE_MASK, %rdi 93 94 movq %rdi, %rax 95 shrq $PUD_SHIFT, %rax 96 andq $(PTRS_PER_PUD - 1), %rax 97 jz ident_complete 98 99 leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx 100 leaq level3_ident_pgt(%rip), %rbx 101 movq %rdx, 0(%rbx, %rax, 8) 102 103 movq %rdi, %rax 104 shrq $PMD_SHIFT, %rax 105 andq $(PTRS_PER_PMD - 1), %rax 106 leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx 107 leaq level2_spare_pgt(%rip), %rbx 108 movq %rdx, 0(%rbx, %rax, 8) 109ident_complete: 110 111 /* 112 * Fixup the kernel text+data virtual addresses. Note that 113 * we might write invalid pmds, when the kernel is relocated 114 * cleanup_highmap() fixes this up along with the mappings 115 * beyond _end. 116 */ 117 118 leaq level2_kernel_pgt(%rip), %rdi 119 leaq 4096(%rdi), %r8 120 /* See if it is a valid page table entry */ 1211: testq $1, 0(%rdi) 122 jz 2f 123 addq %rbp, 0(%rdi) 124 /* Go to the next page */ 1252: addq $8, %rdi 126 cmp %r8, %rdi 127 jne 1b 128 129 /* Fixup phys_base */ 130 addq %rbp, phys_base(%rip) 131 132#ifdef CONFIG_X86_TRAMPOLINE 133 addq %rbp, trampoline_level4_pgt + 0(%rip) 134 addq %rbp, trampoline_level4_pgt + (511*8)(%rip) 135#endif 136 137 /* Due to ENTRY(), sometimes the empty space gets filled with 138 * zeros. Better take a jmp than relying on empty space being 139 * filled with 0x90 (nop) 140 */ 141 jmp secondary_startup_64 142ENTRY(secondary_startup_64) 143 /* 144 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 145 * and someone has loaded a mapped page table. 146 * 147 * %esi holds a physical pointer to real_mode_data. 148 * 149 * We come here either from startup_64 (using physical addresses) 150 * or from trampoline.S (using virtual addresses). 151 * 152 * Using virtual addresses from trampoline.S removes the need 153 * to have any identity mapped pages in the kernel page table 154 * after the boot processor executes this code. 155 */ 156 157 /* Enable PAE mode and PGE */ 158 movl $(X86_CR4_PAE | X86_CR4_PGE), %eax 159 movq %rax, %cr4 160 161 /* Setup early boot stage 4 level pagetables. */ 162 movq $(init_level4_pgt - __START_KERNEL_map), %rax 163 addq phys_base(%rip), %rax 164 movq %rax, %cr3 165 166 /* Ensure I am executing from virtual addresses */ 167 movq $1f, %rax 168 jmp *%rax 1691: 170 171 /* Check if nx is implemented */ 172 movl $0x80000001, %eax 173 cpuid 174 movl %edx,%edi 175 176 /* Setup EFER (Extended Feature Enable Register) */ 177 movl $MSR_EFER, %ecx 178 rdmsr 179 btsl $_EFER_SCE, %eax /* Enable System Call */ 180 btl $20,%edi /* No Execute supported? */ 181 jnc 1f 182 btsl $_EFER_NX, %eax 1831: wrmsr /* Make changes effective */ 184 185 /* Setup cr0 */ 186#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 187 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 188 X86_CR0_PG) 189 movl $CR0_STATE, %eax 190 /* Make changes effective */ 191 movq %rax, %cr0 192 193 /* Setup a boot time stack */ 194 movq init_rsp(%rip),%rsp 195 196 /* zero EFLAGS after setting rsp */ 197 pushq $0 198 popfq 199 200 /* 201 * We must switch to a new descriptor in kernel space for the GDT 202 * because soon the kernel won't have access anymore to the userspace 203 * addresses where we're currently running on. We have to do that here 204 * because in 32bit we couldn't load a 64bit linear address. 205 */ 206 lgdt cpu_gdt_descr(%rip) 207 208 /* set up data segments. actually 0 would do too */ 209 movl $__KERNEL_DS,%eax 210 movl %eax,%ds 211 movl %eax,%ss 212 movl %eax,%es 213 214 /* 215 * We don't really need to load %fs or %gs, but load them anyway 216 * to kill any stale realmode selectors. This allows execution 217 * under VT hardware. 218 */ 219 movl %eax,%fs 220 movl %eax,%gs 221 222 /* 223 * Setup up a dummy PDA. this is just for some early bootup code 224 * that does in_interrupt() 225 */ 226 movl $MSR_GS_BASE,%ecx 227 movq $empty_zero_page,%rax 228 movq %rax,%rdx 229 shrq $32,%rdx 230 wrmsr 231 232 /* esi is pointer to real mode structure with interesting info. 233 pass it to C */ 234 movl %esi, %edi 235 236 /* Finally jump to run C code and to be on real kernel address 237 * Since we are running on identity-mapped space we have to jump 238 * to the full 64bit address, this is only possible as indirect 239 * jump. In addition we need to ensure %cs is set so we make this 240 * a far return. 241 */ 242 movq initial_code(%rip),%rax 243 pushq $0 # fake return address to stop unwinder 244 pushq $__KERNEL_CS # set correct cs 245 pushq %rax # target address in negative space 246 lretq 247 248 /* SMP bootup changes these two */ 249 __REFDATA 250 .align 8 251 ENTRY(initial_code) 252 .quad x86_64_start_kernel 253 __FINITDATA 254 255 ENTRY(init_rsp) 256 .quad init_thread_union+THREAD_SIZE-8 257 258bad_address: 259 jmp bad_address 260 261 .section ".init.text","ax" 262#ifdef CONFIG_EARLY_PRINTK 263 .globl early_idt_handlers 264early_idt_handlers: 265 i = 0 266 .rept NUM_EXCEPTION_VECTORS 267 movl $i, %esi 268 jmp early_idt_handler 269 i = i + 1 270 .endr 271#endif 272 273ENTRY(early_idt_handler) 274#ifdef CONFIG_EARLY_PRINTK 275 cmpl $2,early_recursion_flag(%rip) 276 jz 1f 277 incl early_recursion_flag(%rip) 278 GET_CR2_INTO_RCX 279 movq %rcx,%r9 280 xorl %r8d,%r8d # zero for error code 281 movl %esi,%ecx # get vector number 282 # Test %ecx against mask of vectors that push error code. 283 cmpl $31,%ecx 284 ja 0f 285 movl $1,%eax 286 salq %cl,%rax 287 testl $0x27d00,%eax 288 je 0f 289 popq %r8 # get error code 2900: movq 0(%rsp),%rcx # get ip 291 movq 8(%rsp),%rdx # get cs 292 xorl %eax,%eax 293 leaq early_idt_msg(%rip),%rdi 294 call early_printk 295 cmpl $2,early_recursion_flag(%rip) 296 jz 1f 297 call dump_stack 298#ifdef CONFIG_KALLSYMS 299 leaq early_idt_ripmsg(%rip),%rdi 300 movq 8(%rsp),%rsi # get rip again 301 call __print_symbol 302#endif 303#endif /* EARLY_PRINTK */ 3041: hlt 305 jmp 1b 306 307#ifdef CONFIG_EARLY_PRINTK 308early_recursion_flag: 309 .long 0 310 311early_idt_msg: 312 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 313early_idt_ripmsg: 314 .asciz "RIP %s\n" 315#endif /* CONFIG_EARLY_PRINTK */ 316 .previous 317 318.balign PAGE_SIZE 319 320#define NEXT_PAGE(name) \ 321 .balign PAGE_SIZE; \ 322ENTRY(name) 323 324/* Automate the creation of 1 to 1 mapping pmd entries */ 325#define PMDS(START, PERM, COUNT) \ 326 i = 0 ; \ 327 .rept (COUNT) ; \ 328 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 329 i = i + 1 ; \ 330 .endr 331 332 /* 333 * This default setting generates an ident mapping at address 0x100000 334 * and a mapping for the kernel that precisely maps virtual address 335 * 0xffffffff80000000 to physical address 0x000000. (always using 336 * 2Mbyte large pages provided by PAE mode) 337 */ 338NEXT_PAGE(init_level4_pgt) 339 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 340 .fill 257,8,0 341 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 342 .fill 252,8,0 343 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 344 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 345 346NEXT_PAGE(level3_ident_pgt) 347 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 348 .fill 511,8,0 349 350NEXT_PAGE(level3_kernel_pgt) 351 .fill 510,8,0 352 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 353 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 354 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 355 356NEXT_PAGE(level2_fixmap_pgt) 357 .fill 506,8,0 358 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 359 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 360 .fill 5,8,0 361 362NEXT_PAGE(level1_fixmap_pgt) 363 .fill 512,8,0 364 365NEXT_PAGE(level2_ident_pgt) 366 /* Since I easily can, map the first 1G. 367 * Don't set NX because code runs from these pages. 368 */ 369 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) 370 371NEXT_PAGE(level2_kernel_pgt) 372 /* 373 * 512 MB kernel mapping. We spend a full page on this pagetable 374 * anyway. 375 * 376 * The kernel code+data+bss must not be bigger than that. 377 * 378 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 379 * If you want to increase this then increase MODULES_VADDR 380 * too.) 381 */ 382 PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, 383 KERNEL_IMAGE_SIZE/PMD_SIZE) 384 385NEXT_PAGE(level2_spare_pgt) 386 .fill 512, 8, 0 387 388#undef PMDS 389#undef NEXT_PAGE 390 391 .data 392 .align 16 393 .globl cpu_gdt_descr 394cpu_gdt_descr: 395 .word gdt_end-cpu_gdt_table-1 396gdt: 397 .quad cpu_gdt_table 398#ifdef CONFIG_SMP 399 .rept NR_CPUS-1 400 .word 0 401 .quad 0 402 .endr 403#endif 404 405ENTRY(phys_base) 406 /* This must match the first entry in level2_kernel_pgt */ 407 .quad 0x0000000000000000 408 409/* We need valid kernel segments for data and code in long mode too 410 * IRET will check the segment types kkeil 2000/10/28 411 * Also sysret mandates a special GDT layout 412 */ 413 414 .section .data.page_aligned, "aw" 415 .align PAGE_SIZE 416 417/* The TLS descriptors are currently at a different place compared to i386. 418 Hopefully nobody expects them at a fixed place (Wine?) */ 419 420ENTRY(cpu_gdt_table) 421 .quad 0x0000000000000000 /* NULL descriptor */ 422 .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ 423 .quad 0x00af9b000000ffff /* __KERNEL_CS */ 424 .quad 0x00cf93000000ffff /* __KERNEL_DS */ 425 .quad 0x00cffb000000ffff /* __USER32_CS */ 426 .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ 427 .quad 0x00affb000000ffff /* __USER_CS */ 428 .quad 0x0 /* unused */ 429 .quad 0,0 /* TSS */ 430 .quad 0,0 /* LDT */ 431 .quad 0,0,0 /* three TLS descriptors */ 432 .quad 0x0000f40000000000 /* node/CPU stored in limit */ 433gdt_end: 434 /* asm/segment.h:GDT_ENTRIES must match this */ 435 /* This should be a multiple of the cache line size */ 436 /* GDTs of other CPUs are now dynamically allocated */ 437 438 /* zero the remaining page */ 439 .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 440 441 .section .bss, "aw", @nobits 442 .align L1_CACHE_BYTES 443ENTRY(idt_table) 444 .skip 256 * 16 445 446 .section .bss.page_aligned, "aw", @nobits 447 .align PAGE_SIZE 448ENTRY(empty_zero_page) 449 .skip PAGE_SIZE 450