1/* 2 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit 3 * 4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> 6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> 7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> 8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> 9 */ 10 11 12#include <linux/linkage.h> 13#include <linux/threads.h> 14#include <linux/init.h> 15#include <asm/segment.h> 16#include <asm/pgtable.h> 17#include <asm/page.h> 18#include <asm/msr.h> 19#include <asm/cache.h> 20#include <asm/processor-flags.h> 21#include <asm/percpu.h> 22#include <asm/nops.h> 23#include "../entry/calling.h" 24#include <asm/export.h> 25 26#ifdef CONFIG_PARAVIRT 27#include <asm/asm-offsets.h> 28#include <asm/paravirt.h> 29#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 30#else 31#define GET_CR2_INTO(reg) movq %cr2, reg 32#define INTERRUPT_RETURN iretq 33#endif 34 35/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE 36 * because we need identity-mapped pages. 37 * 38 */ 39 40#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 41 42L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) 43L4_START_KERNEL = pgd_index(__START_KERNEL_map) 44L3_START_KERNEL = pud_index(__START_KERNEL_map) 45 46 .text 47 __HEAD 48 .code64 49 .globl startup_64 50startup_64: 51 /* 52 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 53 * and someone has loaded an identity mapped page table 54 * for us. These identity mapped page tables map all of the 55 * kernel pages and possibly all of memory. 56 * 57 * %rsi holds a physical pointer to real_mode_data. 58 * 59 * We come here either directly from a 64bit bootloader, or from 60 * arch/x86/boot/compressed/head_64.S. 61 * 62 * We only come here initially at boot nothing else comes here. 63 * 64 * Since we may be loaded at an address different from what we were 65 * compiled to run at we first fixup the physical addresses in our page 66 * tables and then reload them. 67 */ 68 69 /* Set up the stack for verify_cpu(), similar to initial_stack below */ 70 leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp 71 72 /* Sanitize CPU configuration */ 73 call verify_cpu 74 75 leaq _text(%rip), %rdi 76 pushq %rsi 77 call __startup_64 78 popq %rsi 79 80 movq $(early_top_pgt - __START_KERNEL_map), %rax 81 jmp 1f 82ENTRY(secondary_startup_64) 83 /* 84 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 85 * and someone has loaded a mapped page table. 86 * 87 * %rsi holds a physical pointer to real_mode_data. 88 * 89 * We come here either from startup_64 (using physical addresses) 90 * or from trampoline.S (using virtual addresses). 91 * 92 * Using virtual addresses from trampoline.S removes the need 93 * to have any identity mapped pages in the kernel page table 94 * after the boot processor executes this code. 95 */ 96 97 /* Sanitize CPU configuration */ 98 call verify_cpu 99 100 movq $(init_top_pgt - __START_KERNEL_map), %rax 1011: 102 103 /* Enable PAE mode and PGE */ 104 movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx 105 movq %rcx, %cr4 106 107 /* Setup early boot stage 4 level pagetables. */ 108 addq phys_base(%rip), %rax 109 movq %rax, %cr3 110 111 /* Ensure I am executing from virtual addresses */ 112 movq $1f, %rax 113 jmp *%rax 1141: 115 116 /* Check if nx is implemented */ 117 movl $0x80000001, %eax 118 cpuid 119 movl %edx,%edi 120 121 /* Setup EFER (Extended Feature Enable Register) */ 122 movl $MSR_EFER, %ecx 123 rdmsr 124 btsl $_EFER_SCE, %eax /* Enable System Call */ 125 btl $20,%edi /* No Execute supported? */ 126 jnc 1f 127 btsl $_EFER_NX, %eax 128 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) 1291: wrmsr /* Make changes effective */ 130 131 /* Setup cr0 */ 132#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 133 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 134 X86_CR0_PG) 135 movl $CR0_STATE, %eax 136 /* Make changes effective */ 137 movq %rax, %cr0 138 139 /* Setup a boot time stack */ 140 movq initial_stack(%rip), %rsp 141 142 /* zero EFLAGS after setting rsp */ 143 pushq $0 144 popfq 145 146 /* 147 * We must switch to a new descriptor in kernel space for the GDT 148 * because soon the kernel won't have access anymore to the userspace 149 * addresses where we're currently running on. We have to do that here 150 * because in 32bit we couldn't load a 64bit linear address. 151 */ 152 lgdt early_gdt_descr(%rip) 153 154 /* set up data segments */ 155 xorl %eax,%eax 156 movl %eax,%ds 157 movl %eax,%ss 158 movl %eax,%es 159 160 /* 161 * We don't really need to load %fs or %gs, but load them anyway 162 * to kill any stale realmode selectors. This allows execution 163 * under VT hardware. 164 */ 165 movl %eax,%fs 166 movl %eax,%gs 167 168 /* Set up %gs. 169 * 170 * The base of %gs always points to the bottom of the irqstack 171 * union. If the stack protector canary is enabled, it is 172 * located at %gs:40. Note that, on SMP, the boot cpu uses 173 * init data section till per cpu areas are set up. 174 */ 175 movl $MSR_GS_BASE,%ecx 176 movl initial_gs(%rip),%eax 177 movl initial_gs+4(%rip),%edx 178 wrmsr 179 180 /* rsi is pointer to real mode structure with interesting info. 181 pass it to C */ 182 movq %rsi, %rdi 183 184.Ljump_to_C_code: 185 /* 186 * Jump to run C code and to be on a real kernel address. 187 * Since we are running on identity-mapped space we have to jump 188 * to the full 64bit address, this is only possible as indirect 189 * jump. In addition we need to ensure %cs is set so we make this 190 * a far return. 191 * 192 * Note: do not change to far jump indirect with 64bit offset. 193 * 194 * AMD does not support far jump indirect with 64bit offset. 195 * AMD64 Architecture Programmer's Manual, Volume 3: states only 196 * JMP FAR mem16:16 FF /5 Far jump indirect, 197 * with the target specified by a far pointer in memory. 198 * JMP FAR mem16:32 FF /5 Far jump indirect, 199 * with the target specified by a far pointer in memory. 200 * 201 * Intel64 does support 64bit offset. 202 * Software Developer Manual Vol 2: states: 203 * FF /5 JMP m16:16 Jump far, absolute indirect, 204 * address given in m16:16 205 * FF /5 JMP m16:32 Jump far, absolute indirect, 206 * address given in m16:32. 207 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, 208 * address given in m16:64. 209 */ 210 pushq $.Lafter_lret # put return address on stack for unwinder 211 xorq %rbp, %rbp # clear frame pointer 212 movq initial_code(%rip), %rax 213 pushq $__KERNEL_CS # set correct cs 214 pushq %rax # target address in negative space 215 lretq 216.Lafter_lret: 217ENDPROC(secondary_startup_64) 218 219#include "verify_cpu.S" 220 221#ifdef CONFIG_HOTPLUG_CPU 222/* 223 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set 224 * up already except stack. We just set up stack here. Then call 225 * start_secondary() via .Ljump_to_C_code. 226 */ 227ENTRY(start_cpu0) 228 movq initial_stack(%rip), %rsp 229 jmp .Ljump_to_C_code 230ENDPROC(start_cpu0) 231#endif 232 233 /* Both SMP bootup and ACPI suspend change these variables */ 234 __REFDATA 235 .balign 8 236 GLOBAL(initial_code) 237 .quad x86_64_start_kernel 238 GLOBAL(initial_gs) 239 .quad INIT_PER_CPU_VAR(irq_stack_union) 240 GLOBAL(initial_stack) 241 /* 242 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel 243 * unwinder reliably detect the end of the stack. 244 */ 245 .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS 246 __FINITDATA 247 248bad_address: 249 jmp bad_address 250 251 __INIT 252ENTRY(early_idt_handler_array) 253 # 104(%rsp) %rflags 254 # 96(%rsp) %cs 255 # 88(%rsp) %rip 256 # 80(%rsp) error code 257 i = 0 258 .rept NUM_EXCEPTION_VECTORS 259 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 260 pushq $0 # Dummy error code, to make stack frame uniform 261 .endif 262 pushq $i # 72(%rsp) Vector number 263 jmp early_idt_handler_common 264 i = i + 1 265 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 266 .endr 267ENDPROC(early_idt_handler_array) 268 269early_idt_handler_common: 270 /* 271 * The stack is the hardware frame, an error code or zero, and the 272 * vector number. 273 */ 274 cld 275 276 incl early_recursion_flag(%rip) 277 278 /* The vector number is currently in the pt_regs->di slot. */ 279 pushq %rsi /* pt_regs->si */ 280 movq 8(%rsp), %rsi /* RSI = vector number */ 281 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ 282 pushq %rdx /* pt_regs->dx */ 283 pushq %rcx /* pt_regs->cx */ 284 pushq %rax /* pt_regs->ax */ 285 pushq %r8 /* pt_regs->r8 */ 286 pushq %r9 /* pt_regs->r9 */ 287 pushq %r10 /* pt_regs->r10 */ 288 pushq %r11 /* pt_regs->r11 */ 289 pushq %rbx /* pt_regs->bx */ 290 pushq %rbp /* pt_regs->bp */ 291 pushq %r12 /* pt_regs->r12 */ 292 pushq %r13 /* pt_regs->r13 */ 293 pushq %r14 /* pt_regs->r14 */ 294 pushq %r15 /* pt_regs->r15 */ 295 296 cmpq $14,%rsi /* Page fault? */ 297 jnz 10f 298 GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */ 299 call early_make_pgtable 300 andl %eax,%eax 301 jz 20f /* All good */ 302 30310: 304 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ 305 call early_fixup_exception 306 30720: 308 decl early_recursion_flag(%rip) 309 jmp restore_regs_and_iret 310ENDPROC(early_idt_handler_common) 311 312 __INITDATA 313 314 .balign 4 315GLOBAL(early_recursion_flag) 316 .long 0 317 318#define NEXT_PAGE(name) \ 319 .balign PAGE_SIZE; \ 320GLOBAL(name) 321 322/* Automate the creation of 1 to 1 mapping pmd entries */ 323#define PMDS(START, PERM, COUNT) \ 324 i = 0 ; \ 325 .rept (COUNT) ; \ 326 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ 327 i = i + 1 ; \ 328 .endr 329 330 __INITDATA 331NEXT_PAGE(early_top_pgt) 332 .fill 511,8,0 333 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 334 335NEXT_PAGE(early_dynamic_pgts) 336 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 337 338 .data 339 340#ifndef CONFIG_XEN 341NEXT_PAGE(init_top_pgt) 342 .fill 512,8,0 343#else 344NEXT_PAGE(init_top_pgt) 345 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 346 .org init_top_pgt + L4_PAGE_OFFSET*8, 0 347 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 348 .org init_top_pgt + L4_START_KERNEL*8, 0 349 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 350 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 351 352NEXT_PAGE(level3_ident_pgt) 353 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 354 .fill 511, 8, 0 355NEXT_PAGE(level2_ident_pgt) 356 /* Since I easily can, map the first 1G. 357 * Don't set NX because code runs from these pages. 358 */ 359 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) 360#endif 361 362NEXT_PAGE(level3_kernel_pgt) 363 .fill L3_START_KERNEL,8,0 364 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 365 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 366 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 367 368NEXT_PAGE(level2_kernel_pgt) 369 /* 370 * 512 MB kernel mapping. We spend a full page on this pagetable 371 * anyway. 372 * 373 * The kernel code+data+bss must not be bigger than that. 374 * 375 * (NOTE: at +512MB starts the module area, see MODULES_VADDR. 376 * If you want to increase this then increase MODULES_VADDR 377 * too.) 378 */ 379 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, 380 KERNEL_IMAGE_SIZE/PMD_SIZE) 381 382NEXT_PAGE(level2_fixmap_pgt) 383 .fill 506,8,0 384 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 385 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 386 .fill 5,8,0 387 388NEXT_PAGE(level1_fixmap_pgt) 389 .fill 512,8,0 390 391#undef PMDS 392 393 .data 394 .align 16 395 .globl early_gdt_descr 396early_gdt_descr: 397 .word GDT_ENTRIES*8-1 398early_gdt_descr_base: 399 .quad INIT_PER_CPU_VAR(gdt_page) 400 401ENTRY(phys_base) 402 /* This must match the first entry in level2_kernel_pgt */ 403 .quad 0x0000000000000000 404EXPORT_SYMBOL(phys_base) 405 406#include "../../x86/xen/xen-head.S" 407 408 __PAGE_ALIGNED_BSS 409NEXT_PAGE(empty_zero_page) 410 .skip PAGE_SIZE 411EXPORT_SYMBOL(empty_zero_page) 412 413