1/* 2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/desc.h> 16#include <asm/segment.h> 17#include <asm/pgtable.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21#include <asm/processor-flags.h> 22 23#ifdef CONFIG_PARAVIRT 24#include <asm/asm-offsets.h> 25#include <asm/paravirt.h> 26#else 27#define GET_CR2_INTO_RCX movq %cr2, %rcx 28#endif 29 30/* we are not able to switch in one step to the final KERNEL ADRESS SPACE 31 * because we need identity-mapped pages. 32 * 33 */ 34 35 .text 36 .section .text.head 37 .code64 38 .globl startup_64 39startup_64: 40 41 /* 42 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 43 * and someone has loaded an identity mapped page table 44 * for us. These identity mapped page tables map all of the 45 * kernel pages and possibly all of memory. 46 * 47 * %esi holds a physical pointer to real_mode_data. 48 * 49 * We come here either directly from a 64bit bootloader, or from 50 * arch/x86_64/boot/compressed/head.S. 51 * 52 * We only come here initially at boot nothing else comes here. 53 * 54 * Since we may be loaded at an address different from what we were 55 * compiled to run at we first fixup the physical addresses in our page 56 * tables and then reload them. 57 */ 58 59 /* Compute the delta between the address I am compiled to run at and the 60 * address I am actually running at. 61 */ 62 leaq _text(%rip), %rbp 63 subq $_text - __START_KERNEL_map, %rbp 64 65 /* Is the address not 2M aligned? */ 66 movq %rbp, %rax 67 andl $~PMD_PAGE_MASK, %eax 68 testl %eax, %eax 69 jnz bad_address 70 71 /* Is the address too large? */ 72 leaq _text(%rip), %rdx 73 movq $PGDIR_SIZE, %rax 74 cmpq %rax, %rdx 75 jae bad_address 76 77 /* Fixup the physical addresses in the page table 78 */ 79 addq %rbp, init_level4_pgt + 0(%rip) 80 addq %rbp, init_level4_pgt + (258*8)(%rip) 81 addq %rbp, init_level4_pgt + (511*8)(%rip) 82 83 addq %rbp, level3_ident_pgt + 0(%rip) 84 85 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 86 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 87 88 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 89 90 /* Add an Identity mapping if I am above 1G */ 91 leaq _text(%rip), %rdi 92 andq $PMD_PAGE_MASK, %rdi 93 94 movq %rdi, %rax 95 shrq $PUD_SHIFT, %rax 96 andq $(PTRS_PER_PUD - 1), %rax 97 jz ident_complete 98 99 leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx 100 leaq level3_ident_pgt(%rip), %rbx 101 movq %rdx, 0(%rbx, %rax, 8) 102 103 movq %rdi, %rax 104 shrq $PMD_SHIFT, %rax 105 andq $(PTRS_PER_PMD - 1), %rax 106 leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx 107 leaq level2_spare_pgt(%rip), %rbx 108 movq %rdx, 0(%rbx, %rax, 8) 109ident_complete: 110 111 /* 112 * Fixup the kernel text+data virtual addresses. Note that 113 * we might write invalid pmds, when the kernel is relocated 114 * cleanup_highmap() fixes this up along with the mappings 115 * beyond _end. 116 */ 117 118 leaq level2_kernel_pgt(%rip), %rdi 119 leaq 4096(%rdi), %r8 120 /* See if it is a valid page table entry */ 1211: testq $1, 0(%rdi) 122 jz 2f 123 addq %rbp, 0(%rdi) 124 /* Go to the next page */ 1252: addq $8, %rdi 126 cmp %r8, %rdi 127 jne 1b 128 129 /* Fixup phys_base */ 130 addq %rbp, phys_base(%rip) 131 132#ifdef CONFIG_SMP 133 addq %rbp, trampoline_level4_pgt + 0(%rip) 134 addq %rbp, trampoline_level4_pgt + (511*8)(%rip) 135#endif 136 137 /* Due to ENTRY(), sometimes the empty space gets filled with 138 * zeros. Better take a jmp than relying on empty space being 139 * filled with 0x90 (nop) 140 */ 141 jmp secondary_startup_64 142ENTRY(secondary_startup_64) 143 /* 144 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 145 * and someone has loaded a mapped page table. 146 * 147 * %esi holds a physical pointer to real_mode_data. 148 * 149 * We come here either from startup_64 (using physical addresses) 150 * or from trampoline.S (using virtual addresses). 151 * 152 * Using virtual addresses from trampoline.S removes the need 153 * to have any identity mapped pages in the kernel page table 154 * after the boot processor executes this code. 155 */ 156 157 /* Enable PAE mode and PGE */ 158 xorq %rax, %rax 159 btsq $5, %rax 160 btsq $7, %rax 161 movq %rax, %cr4 162 163 /* Setup early boot stage 4 level pagetables. */ 164 movq $(init_level4_pgt - __START_KERNEL_map), %rax 165 addq phys_base(%rip), %rax 166 movq %rax, %cr3 167 168 /* Ensure I am executing from virtual addresses */ 169 movq $1f, %rax 170 jmp *%rax 1711: 172 173 /* Check if nx is implemented */ 174 movl $0x80000001, %eax 175 cpuid 176 movl %edx,%edi 177 178 /* Setup EFER (Extended Feature Enable Register) */ 179 movl $MSR_EFER, %ecx 180 rdmsr 181 btsl $_EFER_SCE, %eax /* Enable System Call */ 182 btl $20,%edi /* No Execute supported? */ 183 jnc 1f 184 btsl $_EFER_NX, %eax 1851: wrmsr /* Make changes effective */ 186 187 /* Setup cr0 */ 188#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 189 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 190 X86_CR0_PG) 191 movl $CR0_STATE, %eax 192 /* Make changes effective */ 193 movq %rax, %cr0 194 195 /* Setup a boot time stack */ 196 movq init_rsp(%rip),%rsp 197 198 /* zero EFLAGS after setting rsp */ 199 pushq $0 200 popfq 201 202 /* 203 * We must switch to a new descriptor in kernel space for the GDT 204 * because soon the kernel won't have access anymore to the userspace 205 * addresses where we're currently running on. We have to do that here 206 * because in 32bit we couldn't load a 64bit linear address. 207 */ 208 lgdt cpu_gdt_descr(%rip) 209 210 /* set up data segments. actually 0 would do too */ 211 movl $__KERNEL_DS,%eax 212 movl %eax,%ds 213 movl %eax,%ss 214 movl %eax,%es 215 216 /* 217 * We don't really need to load %fs or %gs, but load them anyway 218 * to kill any stale realmode selectors. This allows execution 219 * under VT hardware. 220 */ 221 movl %eax,%fs 222 movl %eax,%gs 223 224 /* 225 * Setup up a dummy PDA. this is just for some early bootup code 226 * that does in_interrupt() 227 */ 228 movl $MSR_GS_BASE,%ecx 229 movq $empty_zero_page,%rax 230 movq %rax,%rdx 231 shrq $32,%rdx 232 wrmsr 233 234 /* esi is pointer to real mode structure with interesting info. 235 pass it to C */ 236 movl %esi, %edi 237 238 /* Finally jump to run C code and to be on real kernel address 239 * Since we are running on identity-mapped space we have to jump 240 * to the full 64bit address, this is only possible as indirect 241 * jump. In addition we need to ensure %cs is set so we make this 242 * a far return. 243 */ 244 movq initial_code(%rip),%rax 245 pushq $0 # fake return address to stop unwinder 246 pushq $__KERNEL_CS # set correct cs 247 pushq %rax # target address in negative space 248 lretq 249 250 /* SMP bootup changes these two */ 251 __REFDATA 252 .align 8 253 ENTRY(initial_code) 254 .quad x86_64_start_kernel 255 __FINITDATA 256 257 ENTRY(init_rsp) 258 .quad init_thread_union+THREAD_SIZE-8 259 260bad_address: 261 jmp bad_address 262 263 .section ".init.text","ax" 264#ifdef CONFIG_EARLY_PRINTK 265 .globl early_idt_handlers 266early_idt_handlers: 267 i = 0 268 .rept NUM_EXCEPTION_VECTORS 269 movl $i, %esi 270 jmp early_idt_handler 271 i = i + 1 272 .endr 273#endif 274 275ENTRY(early_idt_handler) 276#ifdef CONFIG_EARLY_PRINTK 277 cmpl $2,early_recursion_flag(%rip) 278 jz 1f 279 incl early_recursion_flag(%rip) 280 GET_CR2_INTO_RCX 281 movq %rcx,%r9 282 xorl %r8d,%r8d # zero for error code 283 movl %esi,%ecx # get vector number 284 # Test %ecx against mask of vectors that push error code. 285 cmpl $31,%ecx 286 ja 0f 287 movl $1,%eax 288 salq %cl,%rax 289 testl $0x27d00,%eax 290 je 0f 291 popq %r8 # get error code 2920: movq 0(%rsp),%rcx # get ip 293 movq 8(%rsp),%rdx # get cs 294 xorl %eax,%eax 295 leaq early_idt_msg(%rip),%rdi 296 call early_printk 297 cmpl $2,early_recursion_flag(%rip) 298 jz 1f 299 call dump_stack 300#ifdef CONFIG_KALLSYMS 301 leaq early_idt_ripmsg(%rip),%rdi 302 movq 8(%rsp),%rsi # get rip again 303 call __print_symbol 304#endif 305#endif /* EARLY_PRINTK */ 3061: hlt 307 jmp 1b 308 309#ifdef CONFIG_EARLY_PRINTK 310early_recursion_flag: 311 .long 0 312 313early_idt_msg: 314 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 315early_idt_ripmsg: 316 .asciz "RIP %s\n" 317#endif /* CONFIG_EARLY_PRINTK */ 318 .previous 319 320.balign PAGE_SIZE 321 322#define NEXT_PAGE(name) \ 323 .balign PAGE_SIZE; \ 324ENTRY(name) 325 326/* Automate the creation of 1 to 1 mapping pmd entries */ 327#define PMDS(START, PERM, COUNT) \ 328 i = 0 ; \ 329 .rept (COUNT) ; \ 330 .quad (START) + (i << 21) + (PERM) ; \ 331 i = i + 1 ; \ 332 .endr 333 334 /* 335 * This default setting generates an ident mapping at address 0x100000 336 * and a mapping for the kernel that precisely maps virtual address 337 * 0xffffffff80000000 to physical address 0x000000. (always using 338 * 2Mbyte large pages provided by PAE mode) 339 */ 340NEXT_PAGE(init_level4_pgt) 341 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 342 .fill 257,8,0 343 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 344 .fill 252,8,0 345 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 346 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 347 348NEXT_PAGE(level3_ident_pgt) 349 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 350 .fill 511,8,0 351 352NEXT_PAGE(level3_kernel_pgt) 353 .fill 510,8,0 354 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 355 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 356 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 357 358NEXT_PAGE(level2_fixmap_pgt) 359 .fill 506,8,0 360 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 361 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 362 .fill 5,8,0 363 364NEXT_PAGE(level1_fixmap_pgt) 365 .fill 512,8,0 366 367NEXT_PAGE(level2_ident_pgt) 368 /* Since I easily can, map the first 1G. 369 * Don't set NX because code runs from these pages. 370 */ 371 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) 372 373NEXT_PAGE(level2_kernel_pgt) 374 /* 375 * 512 MB kernel mapping. We spend a full page on this pagetable 376 * anyway. 377 * 378 * The kernel code+data+bss must not be bigger than that. 379 * 380 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 381 * If you want to increase this then increase MODULES_VADDR 382 * too.) 383 */ 384 PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, 385 KERNEL_IMAGE_SIZE/PMD_SIZE) 386 387NEXT_PAGE(level2_spare_pgt) 388 .fill 512, 8, 0 389 390#undef PMDS 391#undef NEXT_PAGE 392 393 .data 394 .align 16 395 .globl cpu_gdt_descr 396cpu_gdt_descr: 397 .word gdt_end-cpu_gdt_table-1 398gdt: 399 .quad cpu_gdt_table 400#ifdef CONFIG_SMP 401 .rept NR_CPUS-1 402 .word 0 403 .quad 0 404 .endr 405#endif 406 407ENTRY(phys_base) 408 /* This must match the first entry in level2_kernel_pgt */ 409 .quad 0x0000000000000000 410 411/* We need valid kernel segments for data and code in long mode too 412 * IRET will check the segment types kkeil 2000/10/28 413 * Also sysret mandates a special GDT layout 414 */ 415 416 .section .data.page_aligned, "aw" 417 .align PAGE_SIZE 418 419/* The TLS descriptors are currently at a different place compared to i386. 420 Hopefully nobody expects them at a fixed place (Wine?) */ 421 422ENTRY(cpu_gdt_table) 423 .quad 0x0000000000000000 /* NULL descriptor */ 424 .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ 425 .quad 0x00af9b000000ffff /* __KERNEL_CS */ 426 .quad 0x00cf93000000ffff /* __KERNEL_DS */ 427 .quad 0x00cffb000000ffff /* __USER32_CS */ 428 .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ 429 .quad 0x00affb000000ffff /* __USER_CS */ 430 .quad 0x0 /* unused */ 431 .quad 0,0 /* TSS */ 432 .quad 0,0 /* LDT */ 433 .quad 0,0,0 /* three TLS descriptors */ 434 .quad 0x0000f40000000000 /* node/CPU stored in limit */ 435gdt_end: 436 /* asm/segment.h:GDT_ENTRIES must match this */ 437 /* This should be a multiple of the cache line size */ 438 /* GDTs of other CPUs are now dynamically allocated */ 439 440 /* zero the remaining page */ 441 .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 442 443 .section .bss, "aw", @nobits 444 .align L1_CACHE_BYTES 445ENTRY(idt_table) 446 .skip 256 * 16 447 448 .section .bss.page_aligned, "aw", @nobits 449 .align PAGE_SIZE 450ENTRY(empty_zero_page) 451 .skip PAGE_SIZE 452