1/* 2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/desc.h> 16#include <asm/segment.h> 17#include <asm/pgtable.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21 22#ifdef CONFIG_PARAVIRT 23#include <asm/asm-offsets.h> 24#include <asm/paravirt.h> 25#else 26#define GET_CR2_INTO_RCX movq %cr2, %rcx 27#endif 28 29/* we are not able to switch in one step to the final KERNEL ADRESS SPACE 30 * because we need identity-mapped pages. 31 * 32 */ 33 34 .text 35 .section .text.head 36 .code64 37 .globl startup_64 38startup_64: 39 40 /* 41 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 42 * and someone has loaded an identity mapped page table 43 * for us. These identity mapped page tables map all of the 44 * kernel pages and possibly all of memory. 45 * 46 * %esi holds a physical pointer to real_mode_data. 47 * 48 * We come here either directly from a 64bit bootloader, or from 49 * arch/x86_64/boot/compressed/head.S. 50 * 51 * We only come here initially at boot nothing else comes here. 52 * 53 * Since we may be loaded at an address different from what we were 54 * compiled to run at we first fixup the physical addresses in our page 55 * tables and then reload them. 56 */ 57 58 /* Compute the delta between the address I am compiled to run at and the 59 * address I am actually running at. 60 */ 61 leaq _text(%rip), %rbp 62 subq $_text - __START_KERNEL_map, %rbp 63 64 /* Is the address not 2M aligned? */ 65 movq %rbp, %rax 66 andl $~PMD_PAGE_MASK, %eax 67 testl %eax, %eax 68 jnz bad_address 69 70 /* Is the address too large? */ 71 leaq _text(%rip), %rdx 72 movq $PGDIR_SIZE, %rax 73 cmpq %rax, %rdx 74 jae bad_address 75 76 /* Fixup the physical addresses in the page table 77 */ 78 addq %rbp, init_level4_pgt + 0(%rip) 79 addq %rbp, init_level4_pgt + (258*8)(%rip) 80 addq %rbp, init_level4_pgt + (511*8)(%rip) 81 82 addq %rbp, level3_ident_pgt + 0(%rip) 83 84 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 85 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 86 87 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 88 89 /* Add an Identity mapping if I am above 1G */ 90 leaq _text(%rip), %rdi 91 andq $PMD_PAGE_MASK, %rdi 92 93 movq %rdi, %rax 94 shrq $PUD_SHIFT, %rax 95 andq $(PTRS_PER_PUD - 1), %rax 96 jz ident_complete 97 98 leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx 99 leaq level3_ident_pgt(%rip), %rbx 100 movq %rdx, 0(%rbx, %rax, 8) 101 102 movq %rdi, %rax 103 shrq $PMD_SHIFT, %rax 104 andq $(PTRS_PER_PMD - 1), %rax 105 leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx 106 leaq level2_spare_pgt(%rip), %rbx 107 movq %rdx, 0(%rbx, %rax, 8) 108ident_complete: 109 110 /* 111 * Fixup the kernel text+data virtual addresses. Note that 112 * we might write invalid pmds, when the kernel is relocated 113 * cleanup_highmap() fixes this up along with the mappings 114 * beyond _end. 115 */ 116 117 leaq level2_kernel_pgt(%rip), %rdi 118 leaq 4096(%rdi), %r8 119 /* See if it is a valid page table entry */ 1201: testq $1, 0(%rdi) 121 jz 2f 122 addq %rbp, 0(%rdi) 123 /* Go to the next page */ 1242: addq $8, %rdi 125 cmp %r8, %rdi 126 jne 1b 127 128 /* Fixup phys_base */ 129 addq %rbp, phys_base(%rip) 130 131#ifdef CONFIG_SMP 132 addq %rbp, trampoline_level4_pgt + 0(%rip) 133 addq %rbp, trampoline_level4_pgt + (511*8)(%rip) 134#endif 135#ifdef CONFIG_ACPI_SLEEP 136 addq %rbp, wakeup_level4_pgt + 0(%rip) 137 addq %rbp, wakeup_level4_pgt + (511*8)(%rip) 138#endif 139 140 /* Due to ENTRY(), sometimes the empty space gets filled with 141 * zeros. Better take a jmp than relying on empty space being 142 * filled with 0x90 (nop) 143 */ 144 jmp secondary_startup_64 145ENTRY(secondary_startup_64) 146 /* 147 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 148 * and someone has loaded a mapped page table. 149 * 150 * %esi holds a physical pointer to real_mode_data. 151 * 152 * We come here either from startup_64 (using physical addresses) 153 * or from trampoline.S (using virtual addresses). 154 * 155 * Using virtual addresses from trampoline.S removes the need 156 * to have any identity mapped pages in the kernel page table 157 * after the boot processor executes this code. 158 */ 159 160 /* Enable PAE mode and PGE */ 161 xorq %rax, %rax 162 btsq $5, %rax 163 btsq $7, %rax 164 movq %rax, %cr4 165 166 /* Setup early boot stage 4 level pagetables. */ 167 movq $(init_level4_pgt - __START_KERNEL_map), %rax 168 addq phys_base(%rip), %rax 169 movq %rax, %cr3 170 171 /* Ensure I am executing from virtual addresses */ 172 movq $1f, %rax 173 jmp *%rax 1741: 175 176 /* Check if nx is implemented */ 177 movl $0x80000001, %eax 178 cpuid 179 movl %edx,%edi 180 181 /* Setup EFER (Extended Feature Enable Register) */ 182 movl $MSR_EFER, %ecx 183 rdmsr 184 btsl $_EFER_SCE, %eax /* Enable System Call */ 185 btl $20,%edi /* No Execute supported? */ 186 jnc 1f 187 btsl $_EFER_NX, %eax 1881: wrmsr /* Make changes effective */ 189 190 /* Setup cr0 */ 191#define CR0_PM 1 /* protected mode */ 192#define CR0_MP (1<<1) 193#define CR0_ET (1<<4) 194#define CR0_NE (1<<5) 195#define CR0_WP (1<<16) 196#define CR0_AM (1<<18) 197#define CR0_PAGING (1<<31) 198 movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax 199 /* Make changes effective */ 200 movq %rax, %cr0 201 202 /* Setup a boot time stack */ 203 movq init_rsp(%rip),%rsp 204 205 /* zero EFLAGS after setting rsp */ 206 pushq $0 207 popfq 208 209 /* 210 * We must switch to a new descriptor in kernel space for the GDT 211 * because soon the kernel won't have access anymore to the userspace 212 * addresses where we're currently running on. We have to do that here 213 * because in 32bit we couldn't load a 64bit linear address. 214 */ 215 lgdt cpu_gdt_descr(%rip) 216 217 /* set up data segments. actually 0 would do too */ 218 movl $__KERNEL_DS,%eax 219 movl %eax,%ds 220 movl %eax,%ss 221 movl %eax,%es 222 223 /* 224 * We don't really need to load %fs or %gs, but load them anyway 225 * to kill any stale realmode selectors. This allows execution 226 * under VT hardware. 227 */ 228 movl %eax,%fs 229 movl %eax,%gs 230 231 /* 232 * Setup up a dummy PDA. this is just for some early bootup code 233 * that does in_interrupt() 234 */ 235 movl $MSR_GS_BASE,%ecx 236 movq $empty_zero_page,%rax 237 movq %rax,%rdx 238 shrq $32,%rdx 239 wrmsr 240 241 /* esi is pointer to real mode structure with interesting info. 242 pass it to C */ 243 movl %esi, %edi 244 245 /* Finally jump to run C code and to be on real kernel address 246 * Since we are running on identity-mapped space we have to jump 247 * to the full 64bit address, this is only possible as indirect 248 * jump. In addition we need to ensure %cs is set so we make this 249 * a far return. 250 */ 251 movq initial_code(%rip),%rax 252 pushq $0 # fake return address to stop unwinder 253 pushq $__KERNEL_CS # set correct cs 254 pushq %rax # target address in negative space 255 lretq 256 257 /* SMP bootup changes these two */ 258 __REFDATA 259 .align 8 260 ENTRY(initial_code) 261 .quad x86_64_start_kernel 262 __FINITDATA 263 264 ENTRY(init_rsp) 265 .quad init_thread_union+THREAD_SIZE-8 266 267bad_address: 268 jmp bad_address 269 270#ifdef CONFIG_EARLY_PRINTK 271.macro early_idt_tramp first, last 272 .ifgt \last-\first 273 early_idt_tramp \first, \last-1 274 .endif 275 movl $\last,%esi 276 jmp early_idt_handler 277.endm 278 279 .globl early_idt_handlers 280early_idt_handlers: 281 early_idt_tramp 0, 63 282 early_idt_tramp 64, 127 283 early_idt_tramp 128, 191 284 early_idt_tramp 192, 255 285#endif 286 287ENTRY(early_idt_handler) 288#ifdef CONFIG_EARLY_PRINTK 289 cmpl $2,early_recursion_flag(%rip) 290 jz 1f 291 incl early_recursion_flag(%rip) 292 GET_CR2_INTO_RCX 293 movq %rcx,%r9 294 xorl %r8d,%r8d # zero for error code 295 movl %esi,%ecx # get vector number 296 # Test %ecx against mask of vectors that push error code. 297 cmpl $31,%ecx 298 ja 0f 299 movl $1,%eax 300 salq %cl,%rax 301 testl $0x27d00,%eax 302 je 0f 303 popq %r8 # get error code 3040: movq 0(%rsp),%rcx # get ip 305 movq 8(%rsp),%rdx # get cs 306 xorl %eax,%eax 307 leaq early_idt_msg(%rip),%rdi 308 call early_printk 309 cmpl $2,early_recursion_flag(%rip) 310 jz 1f 311 call dump_stack 312#ifdef CONFIG_KALLSYMS 313 leaq early_idt_ripmsg(%rip),%rdi 314 movq 8(%rsp),%rsi # get rip again 315 call __print_symbol 316#endif 317#endif /* EARLY_PRINTK */ 3181: hlt 319 jmp 1b 320 321#ifdef CONFIG_EARLY_PRINTK 322early_recursion_flag: 323 .long 0 324 325early_idt_msg: 326 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 327early_idt_ripmsg: 328 .asciz "RIP %s\n" 329#endif /* CONFIG_EARLY_PRINTK */ 330 331.balign PAGE_SIZE 332 333#define NEXT_PAGE(name) \ 334 .balign PAGE_SIZE; \ 335ENTRY(name) 336 337/* Automate the creation of 1 to 1 mapping pmd entries */ 338#define PMDS(START, PERM, COUNT) \ 339 i = 0 ; \ 340 .rept (COUNT) ; \ 341 .quad (START) + (i << 21) + (PERM) ; \ 342 i = i + 1 ; \ 343 .endr 344 345 /* 346 * This default setting generates an ident mapping at address 0x100000 347 * and a mapping for the kernel that precisely maps virtual address 348 * 0xffffffff80000000 to physical address 0x000000. (always using 349 * 2Mbyte large pages provided by PAE mode) 350 */ 351NEXT_PAGE(init_level4_pgt) 352 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 353 .fill 257,8,0 354 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 355 .fill 252,8,0 356 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 357 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 358 359NEXT_PAGE(level3_ident_pgt) 360 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 361 .fill 511,8,0 362 363NEXT_PAGE(level3_kernel_pgt) 364 .fill 510,8,0 365 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 366 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 367 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 368 369NEXT_PAGE(level2_fixmap_pgt) 370 .fill 506,8,0 371 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 372 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 373 .fill 5,8,0 374 375NEXT_PAGE(level1_fixmap_pgt) 376 .fill 512,8,0 377 378NEXT_PAGE(level2_ident_pgt) 379 /* Since I easily can, map the first 1G. 380 * Don't set NX because code runs from these pages. 381 */ 382 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) 383 384NEXT_PAGE(level2_kernel_pgt) 385 /* 386 * 128 MB kernel mapping. We spend a full page on this pagetable 387 * anyway. 388 * 389 * The kernel code+data+bss must not be bigger than that. 390 * 391 * (NOTE: at +128MB starts the module area, see MODULES_VADDR. 392 * If you want to increase this then increase MODULES_VADDR 393 * too.) 394 */ 395 PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, 396 KERNEL_IMAGE_SIZE/PMD_SIZE) 397 398NEXT_PAGE(level2_spare_pgt) 399 .fill 512, 8, 0 400 401#undef PMDS 402#undef NEXT_PAGE 403 404 .data 405 .align 16 406 .globl cpu_gdt_descr 407cpu_gdt_descr: 408 .word gdt_end-cpu_gdt_table-1 409gdt: 410 .quad cpu_gdt_table 411#ifdef CONFIG_SMP 412 .rept NR_CPUS-1 413 .word 0 414 .quad 0 415 .endr 416#endif 417 418ENTRY(phys_base) 419 /* This must match the first entry in level2_kernel_pgt */ 420 .quad 0x0000000000000000 421 422/* We need valid kernel segments for data and code in long mode too 423 * IRET will check the segment types kkeil 2000/10/28 424 * Also sysret mandates a special GDT layout 425 */ 426 427 .section .data.page_aligned, "aw" 428 .align PAGE_SIZE 429 430/* The TLS descriptors are currently at a different place compared to i386. 431 Hopefully nobody expects them at a fixed place (Wine?) */ 432 433ENTRY(cpu_gdt_table) 434 .quad 0x0000000000000000 /* NULL descriptor */ 435 .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ 436 .quad 0x00af9b000000ffff /* __KERNEL_CS */ 437 .quad 0x00cf93000000ffff /* __KERNEL_DS */ 438 .quad 0x00cffb000000ffff /* __USER32_CS */ 439 .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ 440 .quad 0x00affb000000ffff /* __USER_CS */ 441 .quad 0x0 /* unused */ 442 .quad 0,0 /* TSS */ 443 .quad 0,0 /* LDT */ 444 .quad 0,0,0 /* three TLS descriptors */ 445 .quad 0x0000f40000000000 /* node/CPU stored in limit */ 446gdt_end: 447 /* asm/segment.h:GDT_ENTRIES must match this */ 448 /* This should be a multiple of the cache line size */ 449 /* GDTs of other CPUs are now dynamically allocated */ 450 451 /* zero the remaining page */ 452 .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 453 454 .section .bss, "aw", @nobits 455 .align L1_CACHE_BYTES 456ENTRY(idt_table) 457 .skip 256 * 16 458 459 .section .bss.page_aligned, "aw", @nobits 460 .align PAGE_SIZE 461ENTRY(empty_zero_page) 462 .skip PAGE_SIZE 463