1/* 2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/desc.h> 16#include <asm/segment.h> 17#include <asm/pgtable.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21 22#ifdef CONFIG_PARAVIRT 23#include <asm/asm-offsets.h> 24#include <asm/paravirt.h> 25#else 26#define GET_CR2_INTO_RCX movq %cr2, %rcx 27#endif 28 29/* we are not able to switch in one step to the final KERNEL ADRESS SPACE 30 * because we need identity-mapped pages. 31 * 32 */ 33 34 .text 35 .section .text.head 36 .code64 37 .globl startup_64 38startup_64: 39 40 /* 41 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 42 * and someone has loaded an identity mapped page table 43 * for us. These identity mapped page tables map all of the 44 * kernel pages and possibly all of memory. 45 * 46 * %esi holds a physical pointer to real_mode_data. 47 * 48 * We come here either directly from a 64bit bootloader, or from 49 * arch/x86_64/boot/compressed/head.S. 50 * 51 * We only come here initially at boot nothing else comes here. 52 * 53 * Since we may be loaded at an address different from what we were 54 * compiled to run at we first fixup the physical addresses in our page 55 * tables and then reload them. 56 */ 57 58 /* Compute the delta between the address I am compiled to run at and the 59 * address I am actually running at. 60 */ 61 leaq _text(%rip), %rbp 62 subq $_text - __START_KERNEL_map, %rbp 63 64 /* Is the address not 2M aligned? */ 65 movq %rbp, %rax 66 andl $~PMD_PAGE_MASK, %eax 67 testl %eax, %eax 68 jnz bad_address 69 70 /* Is the address too large? */ 71 leaq _text(%rip), %rdx 72 movq $PGDIR_SIZE, %rax 73 cmpq %rax, %rdx 74 jae bad_address 75 76 /* Fixup the physical addresses in the page table 77 */ 78 addq %rbp, init_level4_pgt + 0(%rip) 79 addq %rbp, init_level4_pgt + (258*8)(%rip) 80 addq %rbp, init_level4_pgt + (511*8)(%rip) 81 82 addq %rbp, level3_ident_pgt + 0(%rip) 83 84 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 85 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 86 87 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 88 89 /* Add an Identity mapping if I am above 1G */ 90 leaq _text(%rip), %rdi 91 andq $PMD_PAGE_MASK, %rdi 92 93 movq %rdi, %rax 94 shrq $PUD_SHIFT, %rax 95 andq $(PTRS_PER_PUD - 1), %rax 96 jz ident_complete 97 98 leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx 99 leaq level3_ident_pgt(%rip), %rbx 100 movq %rdx, 0(%rbx, %rax, 8) 101 102 movq %rdi, %rax 103 shrq $PMD_SHIFT, %rax 104 andq $(PTRS_PER_PMD - 1), %rax 105 leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx 106 leaq level2_spare_pgt(%rip), %rbx 107 movq %rdx, 0(%rbx, %rax, 8) 108ident_complete: 109 110 /* 111 * Fixup the kernel text+data virtual addresses. Note that 112 * we might write invalid pmds, when the kernel is relocated 113 * cleanup_highmap() fixes this up along with the mappings 114 * beyond _end. 115 */ 116 117 leaq level2_kernel_pgt(%rip), %rdi 118 leaq 4096(%rdi), %r8 119 /* See if it is a valid page table entry */ 1201: testq $1, 0(%rdi) 121 jz 2f 122 addq %rbp, 0(%rdi) 123 /* Go to the next page */ 1242: addq $8, %rdi 125 cmp %r8, %rdi 126 jne 1b 127 128 /* Fixup phys_base */ 129 addq %rbp, phys_base(%rip) 130 131#ifdef CONFIG_SMP 132 addq %rbp, trampoline_level4_pgt + 0(%rip) 133 addq %rbp, trampoline_level4_pgt + (511*8)(%rip) 134#endif 135 136 /* Due to ENTRY(), sometimes the empty space gets filled with 137 * zeros. Better take a jmp than relying on empty space being 138 * filled with 0x90 (nop) 139 */ 140 jmp secondary_startup_64 141ENTRY(secondary_startup_64) 142 /* 143 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 144 * and someone has loaded a mapped page table. 145 * 146 * %esi holds a physical pointer to real_mode_data. 147 * 148 * We come here either from startup_64 (using physical addresses) 149 * or from trampoline.S (using virtual addresses). 150 * 151 * Using virtual addresses from trampoline.S removes the need 152 * to have any identity mapped pages in the kernel page table 153 * after the boot processor executes this code. 154 */ 155 156 /* Enable PAE mode and PGE */ 157 xorq %rax, %rax 158 btsq $5, %rax 159 btsq $7, %rax 160 movq %rax, %cr4 161 162 /* Setup early boot stage 4 level pagetables. */ 163 movq $(init_level4_pgt - __START_KERNEL_map), %rax 164 addq phys_base(%rip), %rax 165 movq %rax, %cr3 166 167 /* Ensure I am executing from virtual addresses */ 168 movq $1f, %rax 169 jmp *%rax 1701: 171 172 /* Check if nx is implemented */ 173 movl $0x80000001, %eax 174 cpuid 175 movl %edx,%edi 176 177 /* Setup EFER (Extended Feature Enable Register) */ 178 movl $MSR_EFER, %ecx 179 rdmsr 180 btsl $_EFER_SCE, %eax /* Enable System Call */ 181 btl $20,%edi /* No Execute supported? */ 182 jnc 1f 183 btsl $_EFER_NX, %eax 1841: wrmsr /* Make changes effective */ 185 186 /* Setup cr0 */ 187#define CR0_PM 1 /* protected mode */ 188#define CR0_MP (1<<1) 189#define CR0_ET (1<<4) 190#define CR0_NE (1<<5) 191#define CR0_WP (1<<16) 192#define CR0_AM (1<<18) 193#define CR0_PAGING (1<<31) 194 movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax 195 /* Make changes effective */ 196 movq %rax, %cr0 197 198 /* Setup a boot time stack */ 199 movq init_rsp(%rip),%rsp 200 201 /* zero EFLAGS after setting rsp */ 202 pushq $0 203 popfq 204 205 /* 206 * We must switch to a new descriptor in kernel space for the GDT 207 * because soon the kernel won't have access anymore to the userspace 208 * addresses where we're currently running on. We have to do that here 209 * because in 32bit we couldn't load a 64bit linear address. 210 */ 211 lgdt cpu_gdt_descr(%rip) 212 213 /* set up data segments. actually 0 would do too */ 214 movl $__KERNEL_DS,%eax 215 movl %eax,%ds 216 movl %eax,%ss 217 movl %eax,%es 218 219 /* 220 * We don't really need to load %fs or %gs, but load them anyway 221 * to kill any stale realmode selectors. This allows execution 222 * under VT hardware. 223 */ 224 movl %eax,%fs 225 movl %eax,%gs 226 227 /* 228 * Setup up a dummy PDA. this is just for some early bootup code 229 * that does in_interrupt() 230 */ 231 movl $MSR_GS_BASE,%ecx 232 movq $empty_zero_page,%rax 233 movq %rax,%rdx 234 shrq $32,%rdx 235 wrmsr 236 237 /* esi is pointer to real mode structure with interesting info. 238 pass it to C */ 239 movl %esi, %edi 240 241 /* Finally jump to run C code and to be on real kernel address 242 * Since we are running on identity-mapped space we have to jump 243 * to the full 64bit address, this is only possible as indirect 244 * jump. In addition we need to ensure %cs is set so we make this 245 * a far return. 246 */ 247 movq initial_code(%rip),%rax 248 pushq $0 # fake return address to stop unwinder 249 pushq $__KERNEL_CS # set correct cs 250 pushq %rax # target address in negative space 251 lretq 252 253 /* SMP bootup changes these two */ 254 __REFDATA 255 .align 8 256 ENTRY(initial_code) 257 .quad x86_64_start_kernel 258 __FINITDATA 259 260 ENTRY(init_rsp) 261 .quad init_thread_union+THREAD_SIZE-8 262 263bad_address: 264 jmp bad_address 265 266 .section ".init.text","ax" 267#ifdef CONFIG_EARLY_PRINTK 268 .globl early_idt_handlers 269early_idt_handlers: 270 i = 0 271 .rept NUM_EXCEPTION_VECTORS 272 movl $i, %esi 273 jmp early_idt_handler 274 i = i + 1 275 .endr 276#endif 277 278ENTRY(early_idt_handler) 279#ifdef CONFIG_EARLY_PRINTK 280 cmpl $2,early_recursion_flag(%rip) 281 jz 1f 282 incl early_recursion_flag(%rip) 283 GET_CR2_INTO_RCX 284 movq %rcx,%r9 285 xorl %r8d,%r8d # zero for error code 286 movl %esi,%ecx # get vector number 287 # Test %ecx against mask of vectors that push error code. 288 cmpl $31,%ecx 289 ja 0f 290 movl $1,%eax 291 salq %cl,%rax 292 testl $0x27d00,%eax 293 je 0f 294 popq %r8 # get error code 2950: movq 0(%rsp),%rcx # get ip 296 movq 8(%rsp),%rdx # get cs 297 xorl %eax,%eax 298 leaq early_idt_msg(%rip),%rdi 299 call early_printk 300 cmpl $2,early_recursion_flag(%rip) 301 jz 1f 302 call dump_stack 303#ifdef CONFIG_KALLSYMS 304 leaq early_idt_ripmsg(%rip),%rdi 305 movq 8(%rsp),%rsi # get rip again 306 call __print_symbol 307#endif 308#endif /* EARLY_PRINTK */ 3091: hlt 310 jmp 1b 311 312#ifdef CONFIG_EARLY_PRINTK 313early_recursion_flag: 314 .long 0 315 316early_idt_msg: 317 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 318early_idt_ripmsg: 319 .asciz "RIP %s\n" 320#endif /* CONFIG_EARLY_PRINTK */ 321 .previous 322 323.balign PAGE_SIZE 324 325#define NEXT_PAGE(name) \ 326 .balign PAGE_SIZE; \ 327ENTRY(name) 328 329/* Automate the creation of 1 to 1 mapping pmd entries */ 330#define PMDS(START, PERM, COUNT) \ 331 i = 0 ; \ 332 .rept (COUNT) ; \ 333 .quad (START) + (i << 21) + (PERM) ; \ 334 i = i + 1 ; \ 335 .endr 336 337 /* 338 * This default setting generates an ident mapping at address 0x100000 339 * and a mapping for the kernel that precisely maps virtual address 340 * 0xffffffff80000000 to physical address 0x000000. (always using 341 * 2Mbyte large pages provided by PAE mode) 342 */ 343NEXT_PAGE(init_level4_pgt) 344 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 345 .fill 257,8,0 346 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 347 .fill 252,8,0 348 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 349 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 350 351NEXT_PAGE(level3_ident_pgt) 352 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 353 .fill 511,8,0 354 355NEXT_PAGE(level3_kernel_pgt) 356 .fill 510,8,0 357 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 358 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 359 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 360 361NEXT_PAGE(level2_fixmap_pgt) 362 .fill 506,8,0 363 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 364 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 365 .fill 5,8,0 366 367NEXT_PAGE(level1_fixmap_pgt) 368 .fill 512,8,0 369 370NEXT_PAGE(level2_ident_pgt) 371 /* Since I easily can, map the first 1G. 372 * Don't set NX because code runs from these pages. 373 */ 374 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) 375 376NEXT_PAGE(level2_kernel_pgt) 377 /* 378 * 512 MB kernel mapping. We spend a full page on this pagetable 379 * anyway. 380 * 381 * The kernel code+data+bss must not be bigger than that. 382 * 383 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 384 * If you want to increase this then increase MODULES_VADDR 385 * too.) 386 */ 387 PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, 388 KERNEL_IMAGE_SIZE/PMD_SIZE) 389 390NEXT_PAGE(level2_spare_pgt) 391 .fill 512, 8, 0 392 393#undef PMDS 394#undef NEXT_PAGE 395 396 .data 397 .align 16 398 .globl cpu_gdt_descr 399cpu_gdt_descr: 400 .word gdt_end-cpu_gdt_table-1 401gdt: 402 .quad cpu_gdt_table 403#ifdef CONFIG_SMP 404 .rept NR_CPUS-1 405 .word 0 406 .quad 0 407 .endr 408#endif 409 410ENTRY(phys_base) 411 /* This must match the first entry in level2_kernel_pgt */ 412 .quad 0x0000000000000000 413 414/* We need valid kernel segments for data and code in long mode too 415 * IRET will check the segment types kkeil 2000/10/28 416 * Also sysret mandates a special GDT layout 417 */ 418 419 .section .data.page_aligned, "aw" 420 .align PAGE_SIZE 421 422/* The TLS descriptors are currently at a different place compared to i386. 423 Hopefully nobody expects them at a fixed place (Wine?) */ 424 425ENTRY(cpu_gdt_table) 426 .quad 0x0000000000000000 /* NULL descriptor */ 427 .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ 428 .quad 0x00af9b000000ffff /* __KERNEL_CS */ 429 .quad 0x00cf93000000ffff /* __KERNEL_DS */ 430 .quad 0x00cffb000000ffff /* __USER32_CS */ 431 .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ 432 .quad 0x00affb000000ffff /* __USER_CS */ 433 .quad 0x0 /* unused */ 434 .quad 0,0 /* TSS */ 435 .quad 0,0 /* LDT */ 436 .quad 0,0,0 /* three TLS descriptors */ 437 .quad 0x0000f40000000000 /* node/CPU stored in limit */ 438gdt_end: 439 /* asm/segment.h:GDT_ENTRIES must match this */ 440 /* This should be a multiple of the cache line size */ 441 /* GDTs of other CPUs are now dynamically allocated */ 442 443 /* zero the remaining page */ 444 .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 445 446 .section .bss, "aw", @nobits 447 .align L1_CACHE_BYTES 448ENTRY(idt_table) 449 .skip 256 * 16 450 451 .section .bss.page_aligned, "aw", @nobits 452 .align PAGE_SIZE 453ENTRY(empty_zero_page) 454 .skip PAGE_SIZE 455