1/* 2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/desc.h> 16#include <asm/segment.h> 17#include <asm/pgtable.h> 18#include <asm/page.h> 19#include <asm/msr.h> 20#include <asm/cache.h> 21 22#ifdef CONFIG_PARAVIRT 23#include <asm/asm-offsets.h> 24#include <asm/paravirt.h> 25#else 26#define GET_CR2_INTO_RCX movq %cr2, %rcx 27#endif 28 29/* we are not able to switch in one step to the final KERNEL ADRESS SPACE 30 * because we need identity-mapped pages. 31 * 32 */ 33 34 .text 35 .section .text.head 36 .code64 37 .globl startup_64 38startup_64: 39 40 /* 41 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 42 * and someone has loaded an identity mapped page table 43 * for us. These identity mapped page tables map all of the 44 * kernel pages and possibly all of memory. 45 * 46 * %esi holds a physical pointer to real_mode_data. 47 * 48 * We come here either directly from a 64bit bootloader, or from 49 * arch/x86_64/boot/compressed/head.S. 50 * 51 * We only come here initially at boot nothing else comes here. 52 * 53 * Since we may be loaded at an address different from what we were 54 * compiled to run at we first fixup the physical addresses in our page 55 * tables and then reload them. 56 */ 57 58 /* Compute the delta between the address I am compiled to run at and the 59 * address I am actually running at. 60 */ 61 leaq _text(%rip), %rbp 62 subq $_text - __START_KERNEL_map, %rbp 63 64 /* Is the address not 2M aligned? */ 65 movq %rbp, %rax 66 andl $~LARGE_PAGE_MASK, %eax 67 testl %eax, %eax 68 jnz bad_address 69 70 /* Is the address too large? */ 71 leaq _text(%rip), %rdx 72 movq $PGDIR_SIZE, %rax 73 cmpq %rax, %rdx 74 jae bad_address 75 76 /* Fixup the physical addresses in the page table 77 */ 78 addq %rbp, init_level4_pgt + 0(%rip) 79 addq %rbp, init_level4_pgt + (258*8)(%rip) 80 addq %rbp, init_level4_pgt + (511*8)(%rip) 81 82 addq %rbp, level3_ident_pgt + 0(%rip) 83 84 addq %rbp, level3_kernel_pgt + (510*8)(%rip) 85 addq %rbp, level3_kernel_pgt + (511*8)(%rip) 86 87 addq %rbp, level2_fixmap_pgt + (506*8)(%rip) 88 89 /* Add an Identity mapping if I am above 1G */ 90 leaq _text(%rip), %rdi 91 andq $LARGE_PAGE_MASK, %rdi 92 93 movq %rdi, %rax 94 shrq $PUD_SHIFT, %rax 95 andq $(PTRS_PER_PUD - 1), %rax 96 jz ident_complete 97 98 leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx 99 leaq level3_ident_pgt(%rip), %rbx 100 movq %rdx, 0(%rbx, %rax, 8) 101 102 movq %rdi, %rax 103 shrq $PMD_SHIFT, %rax 104 andq $(PTRS_PER_PMD - 1), %rax 105 leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx 106 leaq level2_spare_pgt(%rip), %rbx 107 movq %rdx, 0(%rbx, %rax, 8) 108ident_complete: 109 110 /* Fixup the kernel text+data virtual addresses 111 */ 112 leaq level2_kernel_pgt(%rip), %rdi 113 leaq 4096(%rdi), %r8 114 /* See if it is a valid page table entry */ 1151: testq $1, 0(%rdi) 116 jz 2f 117 addq %rbp, 0(%rdi) 118 /* Go to the next page */ 1192: addq $8, %rdi 120 cmp %r8, %rdi 121 jne 1b 122 123 /* Fixup phys_base */ 124 addq %rbp, phys_base(%rip) 125 126#ifdef CONFIG_SMP 127 addq %rbp, trampoline_level4_pgt + 0(%rip) 128 addq %rbp, trampoline_level4_pgt + (511*8)(%rip) 129#endif 130#ifdef CONFIG_ACPI_SLEEP 131 addq %rbp, wakeup_level4_pgt + 0(%rip) 132 addq %rbp, wakeup_level4_pgt + (511*8)(%rip) 133#endif 134 135 /* Due to ENTRY(), sometimes the empty space gets filled with 136 * zeros. Better take a jmp than relying on empty space being 137 * filled with 0x90 (nop) 138 */ 139 jmp secondary_startup_64 140ENTRY(secondary_startup_64) 141 /* 142 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, 143 * and someone has loaded a mapped page table. 144 * 145 * %esi holds a physical pointer to real_mode_data. 146 * 147 * We come here either from startup_64 (using physical addresses) 148 * or from trampoline.S (using virtual addresses). 149 * 150 * Using virtual addresses from trampoline.S removes the need 151 * to have any identity mapped pages in the kernel page table 152 * after the boot processor executes this code. 153 */ 154 155 /* Enable PAE mode and PGE */ 156 xorq %rax, %rax 157 btsq $5, %rax 158 btsq $7, %rax 159 movq %rax, %cr4 160 161 /* Setup early boot stage 4 level pagetables. */ 162 movq $(init_level4_pgt - __START_KERNEL_map), %rax 163 addq phys_base(%rip), %rax 164 movq %rax, %cr3 165 166 /* Ensure I am executing from virtual addresses */ 167 movq $1f, %rax 168 jmp *%rax 1691: 170 171 /* Check if nx is implemented */ 172 movl $0x80000001, %eax 173 cpuid 174 movl %edx,%edi 175 176 /* Setup EFER (Extended Feature Enable Register) */ 177 movl $MSR_EFER, %ecx 178 rdmsr 179 btsl $_EFER_SCE, %eax /* Enable System Call */ 180 btl $20,%edi /* No Execute supported? */ 181 jnc 1f 182 btsl $_EFER_NX, %eax 1831: wrmsr /* Make changes effective */ 184 185 /* Setup cr0 */ 186#define CR0_PM 1 /* protected mode */ 187#define CR0_MP (1<<1) 188#define CR0_ET (1<<4) 189#define CR0_NE (1<<5) 190#define CR0_WP (1<<16) 191#define CR0_AM (1<<18) 192#define CR0_PAGING (1<<31) 193 movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax 194 /* Make changes effective */ 195 movq %rax, %cr0 196 197 /* Setup a boot time stack */ 198 movq init_rsp(%rip),%rsp 199 200 /* zero EFLAGS after setting rsp */ 201 pushq $0 202 popfq 203 204 /* 205 * We must switch to a new descriptor in kernel space for the GDT 206 * because soon the kernel won't have access anymore to the userspace 207 * addresses where we're currently running on. We have to do that here 208 * because in 32bit we couldn't load a 64bit linear address. 209 */ 210 lgdt cpu_gdt_descr(%rip) 211 212 /* set up data segments. actually 0 would do too */ 213 movl $__KERNEL_DS,%eax 214 movl %eax,%ds 215 movl %eax,%ss 216 movl %eax,%es 217 218 /* 219 * We don't really need to load %fs or %gs, but load them anyway 220 * to kill any stale realmode selectors. This allows execution 221 * under VT hardware. 222 */ 223 movl %eax,%fs 224 movl %eax,%gs 225 226 /* 227 * Setup up a dummy PDA. this is just for some early bootup code 228 * that does in_interrupt() 229 */ 230 movl $MSR_GS_BASE,%ecx 231 movq $empty_zero_page,%rax 232 movq %rax,%rdx 233 shrq $32,%rdx 234 wrmsr 235 236 /* esi is pointer to real mode structure with interesting info. 237 pass it to C */ 238 movl %esi, %edi 239 240 /* Finally jump to run C code and to be on real kernel address 241 * Since we are running on identity-mapped space we have to jump 242 * to the full 64bit address, this is only possible as indirect 243 * jump. In addition we need to ensure %cs is set so we make this 244 * a far return. 245 */ 246 movq initial_code(%rip),%rax 247 pushq $0 # fake return address to stop unwinder 248 pushq $__KERNEL_CS # set correct cs 249 pushq %rax # target address in negative space 250 lretq 251 252 /* SMP bootup changes these two */ 253#ifndef CONFIG_HOTPLUG_CPU 254 .pushsection .init.data 255#endif 256 .align 8 257 .globl initial_code 258initial_code: 259 .quad x86_64_start_kernel 260#ifndef CONFIG_HOTPLUG_CPU 261 .popsection 262#endif 263 .globl init_rsp 264init_rsp: 265 .quad init_thread_union+THREAD_SIZE-8 266 267bad_address: 268 jmp bad_address 269 270.macro early_idt_tramp first, last 271 .ifgt \last-\first 272 early_idt_tramp \first, \last-1 273 .endif 274 movl $\last,%esi 275 jmp early_idt_handler 276.endm 277 278 .globl early_idt_handlers 279early_idt_handlers: 280 early_idt_tramp 0, 63 281 early_idt_tramp 64, 127 282 early_idt_tramp 128, 191 283 early_idt_tramp 192, 255 284 285ENTRY(early_idt_handler) 286 cmpl $2,early_recursion_flag(%rip) 287 jz 1f 288 incl early_recursion_flag(%rip) 289 GET_CR2_INTO_RCX 290 movq %rcx,%r9 291 xorl %r8d,%r8d # zero for error code 292 movl %esi,%ecx # get vector number 293 # Test %ecx against mask of vectors that push error code. 294 cmpl $31,%ecx 295 ja 0f 296 movl $1,%eax 297 salq %cl,%rax 298 testl $0x27d00,%eax 299 je 0f 300 popq %r8 # get error code 3010: movq 0(%rsp),%rcx # get ip 302 movq 8(%rsp),%rdx # get cs 303 xorl %eax,%eax 304 leaq early_idt_msg(%rip),%rdi 305 call early_printk 306 cmpl $2,early_recursion_flag(%rip) 307 jz 1f 308 call dump_stack 309#ifdef CONFIG_KALLSYMS 310 leaq early_idt_ripmsg(%rip),%rdi 311 movq 8(%rsp),%rsi # get rip again 312 call __print_symbol 313#endif 3141: hlt 315 jmp 1b 316early_recursion_flag: 317 .long 0 318 319early_idt_msg: 320 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" 321early_idt_ripmsg: 322 .asciz "RIP %s\n" 323 324.balign PAGE_SIZE 325 326#define NEXT_PAGE(name) \ 327 .balign PAGE_SIZE; \ 328ENTRY(name) 329 330/* Automate the creation of 1 to 1 mapping pmd entries */ 331#define PMDS(START, PERM, COUNT) \ 332 i = 0 ; \ 333 .rept (COUNT) ; \ 334 .quad (START) + (i << 21) + (PERM) ; \ 335 i = i + 1 ; \ 336 .endr 337 338 /* 339 * This default setting generates an ident mapping at address 0x100000 340 * and a mapping for the kernel that precisely maps virtual address 341 * 0xffffffff80000000 to physical address 0x000000. (always using 342 * 2Mbyte large pages provided by PAE mode) 343 */ 344NEXT_PAGE(init_level4_pgt) 345 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 346 .fill 257,8,0 347 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 348 .fill 252,8,0 349 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 350 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 351 352NEXT_PAGE(level3_ident_pgt) 353 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 354 .fill 511,8,0 355 356NEXT_PAGE(level3_kernel_pgt) 357 .fill 510,8,0 358 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 359 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 360 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 361 362NEXT_PAGE(level2_fixmap_pgt) 363 .fill 506,8,0 364 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 365 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 366 .fill 5,8,0 367 368NEXT_PAGE(level1_fixmap_pgt) 369 .fill 512,8,0 370 371NEXT_PAGE(level2_ident_pgt) 372 /* Since I easily can, map the first 1G. 373 * Don't set NX because code runs from these pages. 374 */ 375 PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) 376 377NEXT_PAGE(level2_kernel_pgt) 378 /* 40MB kernel mapping. The kernel code cannot be bigger than that. 379 When you change this change KERNEL_TEXT_SIZE in page.h too. */ 380 /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */ 381 PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, KERNEL_TEXT_SIZE/PMD_SIZE) 382 /* Module mapping starts here */ 383 .fill (PTRS_PER_PMD - (KERNEL_TEXT_SIZE/PMD_SIZE)),8,0 384 385NEXT_PAGE(level2_spare_pgt) 386 .fill 512,8,0 387 388#undef PMDS 389#undef NEXT_PAGE 390 391 .data 392 .align 16 393 .globl cpu_gdt_descr 394cpu_gdt_descr: 395 .word gdt_end-cpu_gdt_table-1 396gdt: 397 .quad cpu_gdt_table 398#ifdef CONFIG_SMP 399 .rept NR_CPUS-1 400 .word 0 401 .quad 0 402 .endr 403#endif 404 405ENTRY(phys_base) 406 /* This must match the first entry in level2_kernel_pgt */ 407 .quad 0x0000000000000000 408 409/* We need valid kernel segments for data and code in long mode too 410 * IRET will check the segment types kkeil 2000/10/28 411 * Also sysret mandates a special GDT layout 412 */ 413 414 .section .data.page_aligned, "aw" 415 .align PAGE_SIZE 416 417/* The TLS descriptors are currently at a different place compared to i386. 418 Hopefully nobody expects them at a fixed place (Wine?) */ 419 420ENTRY(cpu_gdt_table) 421 .quad 0x0000000000000000 /* NULL descriptor */ 422 .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ 423 .quad 0x00af9b000000ffff /* __KERNEL_CS */ 424 .quad 0x00cf93000000ffff /* __KERNEL_DS */ 425 .quad 0x00cffb000000ffff /* __USER32_CS */ 426 .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ 427 .quad 0x00affb000000ffff /* __USER_CS */ 428 .quad 0x0 /* unused */ 429 .quad 0,0 /* TSS */ 430 .quad 0,0 /* LDT */ 431 .quad 0,0,0 /* three TLS descriptors */ 432 .quad 0x0000f40000000000 /* node/CPU stored in limit */ 433gdt_end: 434 /* asm/segment.h:GDT_ENTRIES must match this */ 435 /* This should be a multiple of the cache line size */ 436 /* GDTs of other CPUs are now dynamically allocated */ 437 438 /* zero the remaining page */ 439 .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 440 441 .section .bss, "aw", @nobits 442 .align L1_CACHE_BYTES 443ENTRY(idt_table) 444 .skip 256 * 16 445 446 .section .bss.page_aligned, "aw", @nobits 447 .align PAGE_SIZE 448ENTRY(empty_zero_page) 449 .skip PAGE_SIZE 450