1 #ifndef _ASM_X86_PROCESSOR_H 2 #define _ASM_X86_PROCESSOR_H 3 4 #include <asm/processor-flags.h> 5 6 /* Forward declaration, a strange C thing */ 7 struct task_struct; 8 struct mm_struct; 9 10 #include <asm/vm86.h> 11 #include <asm/math_emu.h> 12 #include <asm/segment.h> 13 #include <asm/types.h> 14 #include <asm/sigcontext.h> 15 #include <asm/current.h> 16 #include <asm/cpufeature.h> 17 #include <asm/page.h> 18 #include <asm/pgtable_types.h> 19 #include <asm/percpu.h> 20 #include <asm/msr.h> 21 #include <asm/desc_defs.h> 22 #include <asm/nops.h> 23 #include <asm/special_insns.h> 24 25 #include <linux/personality.h> 26 #include <linux/cpumask.h> 27 #include <linux/cache.h> 28 #include <linux/threads.h> 29 #include <linux/math64.h> 30 #include <linux/err.h> 31 #include <linux/irqflags.h> 32 33 /* 34 * We handle most unaligned accesses in hardware. On the other hand 35 * unaligned DMA can be quite expensive on some Nehalem processors. 36 * 37 * Based on this we disable the IP header alignment in network drivers. 38 */ 39 #define NET_IP_ALIGN 0 40 41 #define HBP_NUM 4 42 /* 43 * Default implementation of macro that returns current 44 * instruction pointer ("program counter"). 45 */ 46 static inline void *current_text_addr(void) 47 { 48 void *pc; 49 50 asm volatile("mov $1f, %0; 1:":"=r" (pc)); 51 52 return pc; 53 } 54 55 #ifdef CONFIG_X86_VSMP 56 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 57 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 58 #else 59 # define ARCH_MIN_TASKALIGN 16 60 # define ARCH_MIN_MMSTRUCT_ALIGN 0 61 #endif 62 63 enum tlb_infos { 64 ENTRIES, 65 NR_INFO 66 }; 67 68 extern u16 __read_mostly tlb_lli_4k[NR_INFO]; 69 extern u16 __read_mostly tlb_lli_2m[NR_INFO]; 70 extern u16 __read_mostly tlb_lli_4m[NR_INFO]; 71 extern u16 __read_mostly tlb_lld_4k[NR_INFO]; 72 extern u16 __read_mostly tlb_lld_2m[NR_INFO]; 73 extern u16 __read_mostly tlb_lld_4m[NR_INFO]; 74 extern u16 __read_mostly tlb_lld_1g[NR_INFO]; 75 76 /* 77 * CPU type and hardware bug flags. Kept separately for each CPU. 78 * Members of this structure are referenced in head.S, so think twice 79 * before touching them. [mj] 80 */ 81 82 struct cpuinfo_x86 { 83 __u8 x86; /* CPU family */ 84 __u8 x86_vendor; /* CPU vendor */ 85 __u8 x86_model; 86 __u8 x86_mask; 87 #ifdef CONFIG_X86_32 88 char wp_works_ok; /* It doesn't on 386's */ 89 90 /* Problems on some 486Dx4's and old 386's: */ 91 char rfu; 92 char pad0; 93 char pad1; 94 #else 95 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 96 int x86_tlbsize; 97 #endif 98 __u8 x86_virt_bits; 99 __u8 x86_phys_bits; 100 /* CPUID returned core id bits: */ 101 __u8 x86_coreid_bits; 102 /* Max extended CPUID function supported: */ 103 __u32 extended_cpuid_level; 104 /* Maximum supported CPUID level, -1=no CPUID: */ 105 int cpuid_level; 106 __u32 x86_capability[NCAPINTS + NBUGINTS]; 107 char x86_vendor_id[16]; 108 char x86_model_id[64]; 109 /* in KB - valid for CPUS which support this call: */ 110 int x86_cache_size; 111 int x86_cache_alignment; /* In bytes */ 112 int x86_power; 113 unsigned long loops_per_jiffy; 114 /* cpuid returned max cores value: */ 115 u16 x86_max_cores; 116 u16 apicid; 117 u16 initial_apicid; 118 u16 x86_clflush_size; 119 /* number of cores as seen by the OS: */ 120 u16 booted_cores; 121 /* Physical processor id: */ 122 u16 phys_proc_id; 123 /* Core id: */ 124 u16 cpu_core_id; 125 /* Compute unit id */ 126 u8 compute_unit_id; 127 /* Index into per_cpu list: */ 128 u16 cpu_index; 129 u32 microcode; 130 } __attribute__((__aligned__(SMP_CACHE_BYTES))); 131 132 #define X86_VENDOR_INTEL 0 133 #define X86_VENDOR_CYRIX 1 134 #define X86_VENDOR_AMD 2 135 #define X86_VENDOR_UMC 3 136 #define X86_VENDOR_CENTAUR 5 137 #define X86_VENDOR_TRANSMETA 7 138 #define X86_VENDOR_NSC 8 139 #define X86_VENDOR_NUM 9 140 141 #define X86_VENDOR_UNKNOWN 0xff 142 143 /* 144 * capabilities of CPUs 145 */ 146 extern struct cpuinfo_x86 boot_cpu_data; 147 extern struct cpuinfo_x86 new_cpu_data; 148 149 extern struct tss_struct doublefault_tss; 150 extern __u32 cpu_caps_cleared[NCAPINTS]; 151 extern __u32 cpu_caps_set[NCAPINTS]; 152 153 #ifdef CONFIG_SMP 154 DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 155 #define cpu_data(cpu) per_cpu(cpu_info, cpu) 156 #else 157 #define cpu_info boot_cpu_data 158 #define cpu_data(cpu) boot_cpu_data 159 #endif 160 161 extern const struct seq_operations cpuinfo_op; 162 163 #define cache_line_size() (boot_cpu_data.x86_cache_alignment) 164 165 extern void cpu_detect(struct cpuinfo_x86 *c); 166 extern void fpu_detect(struct cpuinfo_x86 *c); 167 168 extern void early_cpu_init(void); 169 extern void identify_boot_cpu(void); 170 extern void identify_secondary_cpu(struct cpuinfo_x86 *); 171 extern void print_cpu_info(struct cpuinfo_x86 *); 172 void print_cpu_msr(struct cpuinfo_x86 *); 173 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 174 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 175 extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); 176 177 extern void detect_extended_topology(struct cpuinfo_x86 *c); 178 extern void detect_ht(struct cpuinfo_x86 *c); 179 180 #ifdef CONFIG_X86_32 181 extern int have_cpuid_p(void); 182 #else 183 static inline int have_cpuid_p(void) 184 { 185 return 1; 186 } 187 #endif 188 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 189 unsigned int *ecx, unsigned int *edx) 190 { 191 /* ecx is often an input as well as an output. */ 192 asm volatile("cpuid" 193 : "=a" (*eax), 194 "=b" (*ebx), 195 "=c" (*ecx), 196 "=d" (*edx) 197 : "0" (*eax), "2" (*ecx) 198 : "memory"); 199 } 200 201 static inline void load_cr3(pgd_t *pgdir) 202 { 203 write_cr3(__pa(pgdir)); 204 } 205 206 #ifdef CONFIG_X86_32 207 /* This is the TSS defined by the hardware. */ 208 struct x86_hw_tss { 209 unsigned short back_link, __blh; 210 unsigned long sp0; 211 unsigned short ss0, __ss0h; 212 unsigned long sp1; 213 /* ss1 caches MSR_IA32_SYSENTER_CS: */ 214 unsigned short ss1, __ss1h; 215 unsigned long sp2; 216 unsigned short ss2, __ss2h; 217 unsigned long __cr3; 218 unsigned long ip; 219 unsigned long flags; 220 unsigned long ax; 221 unsigned long cx; 222 unsigned long dx; 223 unsigned long bx; 224 unsigned long sp; 225 unsigned long bp; 226 unsigned long si; 227 unsigned long di; 228 unsigned short es, __esh; 229 unsigned short cs, __csh; 230 unsigned short ss, __ssh; 231 unsigned short ds, __dsh; 232 unsigned short fs, __fsh; 233 unsigned short gs, __gsh; 234 unsigned short ldt, __ldth; 235 unsigned short trace; 236 unsigned short io_bitmap_base; 237 238 } __attribute__((packed)); 239 #else 240 struct x86_hw_tss { 241 u32 reserved1; 242 u64 sp0; 243 u64 sp1; 244 u64 sp2; 245 u64 reserved2; 246 u64 ist[7]; 247 u32 reserved3; 248 u32 reserved4; 249 u16 reserved5; 250 u16 io_bitmap_base; 251 252 } __attribute__((packed)) ____cacheline_aligned; 253 #endif 254 255 /* 256 * IO-bitmap sizes: 257 */ 258 #define IO_BITMAP_BITS 65536 259 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 260 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 261 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 262 #define INVALID_IO_BITMAP_OFFSET 0x8000 263 264 struct tss_struct { 265 /* 266 * The hardware state: 267 */ 268 struct x86_hw_tss x86_tss; 269 270 /* 271 * The extra 1 is there because the CPU will access an 272 * additional byte beyond the end of the IO permission 273 * bitmap. The extra byte must be all 1 bits, and must 274 * be within the limit. 275 */ 276 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 277 278 /* 279 * .. and then another 0x100 bytes for the emergency kernel stack: 280 */ 281 unsigned long stack[64]; 282 283 } ____cacheline_aligned; 284 285 DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); 286 287 /* 288 * Save the original ist values for checking stack pointers during debugging 289 */ 290 struct orig_ist { 291 unsigned long ist[7]; 292 }; 293 294 #define MXCSR_DEFAULT 0x1f80 295 296 struct i387_fsave_struct { 297 u32 cwd; /* FPU Control Word */ 298 u32 swd; /* FPU Status Word */ 299 u32 twd; /* FPU Tag Word */ 300 u32 fip; /* FPU IP Offset */ 301 u32 fcs; /* FPU IP Selector */ 302 u32 foo; /* FPU Operand Pointer Offset */ 303 u32 fos; /* FPU Operand Pointer Selector */ 304 305 /* 8*10 bytes for each FP-reg = 80 bytes: */ 306 u32 st_space[20]; 307 308 /* Software status information [not touched by FSAVE ]: */ 309 u32 status; 310 }; 311 312 struct i387_fxsave_struct { 313 u16 cwd; /* Control Word */ 314 u16 swd; /* Status Word */ 315 u16 twd; /* Tag Word */ 316 u16 fop; /* Last Instruction Opcode */ 317 union { 318 struct { 319 u64 rip; /* Instruction Pointer */ 320 u64 rdp; /* Data Pointer */ 321 }; 322 struct { 323 u32 fip; /* FPU IP Offset */ 324 u32 fcs; /* FPU IP Selector */ 325 u32 foo; /* FPU Operand Offset */ 326 u32 fos; /* FPU Operand Selector */ 327 }; 328 }; 329 u32 mxcsr; /* MXCSR Register State */ 330 u32 mxcsr_mask; /* MXCSR Mask */ 331 332 /* 8*16 bytes for each FP-reg = 128 bytes: */ 333 u32 st_space[32]; 334 335 /* 16*16 bytes for each XMM-reg = 256 bytes: */ 336 u32 xmm_space[64]; 337 338 u32 padding[12]; 339 340 union { 341 u32 padding1[12]; 342 u32 sw_reserved[12]; 343 }; 344 345 } __attribute__((aligned(16))); 346 347 struct i387_soft_struct { 348 u32 cwd; 349 u32 swd; 350 u32 twd; 351 u32 fip; 352 u32 fcs; 353 u32 foo; 354 u32 fos; 355 /* 8*10 bytes for each FP-reg = 80 bytes: */ 356 u32 st_space[20]; 357 u8 ftop; 358 u8 changed; 359 u8 lookahead; 360 u8 no_update; 361 u8 rm; 362 u8 alimit; 363 struct math_emu_info *info; 364 u32 entry_eip; 365 }; 366 367 struct ymmh_struct { 368 /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ 369 u32 ymmh_space[64]; 370 }; 371 372 /* We don't support LWP yet: */ 373 struct lwp_struct { 374 u8 reserved[128]; 375 }; 376 377 struct bndregs_struct { 378 u64 bndregs[8]; 379 } __packed; 380 381 struct bndcsr_struct { 382 u64 cfg_reg_u; 383 u64 status_reg; 384 } __packed; 385 386 struct xsave_hdr_struct { 387 u64 xstate_bv; 388 u64 xcomp_bv; 389 u64 reserved[6]; 390 } __attribute__((packed)); 391 392 struct xsave_struct { 393 struct i387_fxsave_struct i387; 394 struct xsave_hdr_struct xsave_hdr; 395 struct ymmh_struct ymmh; 396 struct lwp_struct lwp; 397 struct bndregs_struct bndregs; 398 struct bndcsr_struct bndcsr; 399 /* new processor state extensions will go here */ 400 } __attribute__ ((packed, aligned (64))); 401 402 union thread_xstate { 403 struct i387_fsave_struct fsave; 404 struct i387_fxsave_struct fxsave; 405 struct i387_soft_struct soft; 406 struct xsave_struct xsave; 407 }; 408 409 struct fpu { 410 unsigned int last_cpu; 411 unsigned int has_fpu; 412 union thread_xstate *state; 413 }; 414 415 #ifdef CONFIG_X86_64 416 DECLARE_PER_CPU(struct orig_ist, orig_ist); 417 418 union irq_stack_union { 419 char irq_stack[IRQ_STACK_SIZE]; 420 /* 421 * GCC hardcodes the stack canary as %gs:40. Since the 422 * irq_stack is the object at %gs:0, we reserve the bottom 423 * 48 bytes of the irq stack for the canary. 424 */ 425 struct { 426 char gs_base[40]; 427 unsigned long stack_canary; 428 }; 429 }; 430 431 DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible; 432 DECLARE_INIT_PER_CPU(irq_stack_union); 433 434 DECLARE_PER_CPU(char *, irq_stack_ptr); 435 DECLARE_PER_CPU(unsigned int, irq_count); 436 extern asmlinkage void ignore_sysret(void); 437 #else /* X86_64 */ 438 #ifdef CONFIG_CC_STACKPROTECTOR 439 /* 440 * Make sure stack canary segment base is cached-aligned: 441 * "For Intel Atom processors, avoid non zero segment base address 442 * that is not aligned to cache line boundary at all cost." 443 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) 444 */ 445 struct stack_canary { 446 char __pad[20]; /* canary at %gs:20 */ 447 unsigned long canary; 448 }; 449 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 450 #endif 451 /* 452 * per-CPU IRQ handling stacks 453 */ 454 struct irq_stack { 455 u32 stack[THREAD_SIZE/sizeof(u32)]; 456 } __aligned(THREAD_SIZE); 457 458 DECLARE_PER_CPU(struct irq_stack *, hardirq_stack); 459 DECLARE_PER_CPU(struct irq_stack *, softirq_stack); 460 #endif /* X86_64 */ 461 462 extern unsigned int xstate_size; 463 extern void free_thread_xstate(struct task_struct *); 464 extern struct kmem_cache *task_xstate_cachep; 465 466 struct perf_event; 467 468 struct thread_struct { 469 /* Cached TLS descriptors: */ 470 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 471 unsigned long sp0; 472 unsigned long sp; 473 #ifdef CONFIG_X86_32 474 unsigned long sysenter_cs; 475 #else 476 unsigned long usersp; /* Copy from PDA */ 477 unsigned short es; 478 unsigned short ds; 479 unsigned short fsindex; 480 unsigned short gsindex; 481 #endif 482 #ifdef CONFIG_X86_32 483 unsigned long ip; 484 #endif 485 #ifdef CONFIG_X86_64 486 unsigned long fs; 487 #endif 488 unsigned long gs; 489 /* Save middle states of ptrace breakpoints */ 490 struct perf_event *ptrace_bps[HBP_NUM]; 491 /* Debug status used for traps, single steps, etc... */ 492 unsigned long debugreg6; 493 /* Keep track of the exact dr7 value set by the user */ 494 unsigned long ptrace_dr7; 495 /* Fault info: */ 496 unsigned long cr2; 497 unsigned long trap_nr; 498 unsigned long error_code; 499 /* floating point and extended processor state */ 500 struct fpu fpu; 501 #ifdef CONFIG_X86_32 502 /* Virtual 86 mode info */ 503 struct vm86_struct __user *vm86_info; 504 unsigned long screen_bitmap; 505 unsigned long v86flags; 506 unsigned long v86mask; 507 unsigned long saved_sp0; 508 unsigned int saved_fs; 509 unsigned int saved_gs; 510 #endif 511 /* IO permissions: */ 512 unsigned long *io_bitmap_ptr; 513 unsigned long iopl; 514 /* Max allowed port in the bitmap, in bytes: */ 515 unsigned io_bitmap_max; 516 /* 517 * fpu_counter contains the number of consecutive context switches 518 * that the FPU is used. If this is over a threshold, the lazy fpu 519 * saving becomes unlazy to save the trap. This is an unsigned char 520 * so that after 256 times the counter wraps and the behavior turns 521 * lazy again; this to deal with bursty apps that only use FPU for 522 * a short time 523 */ 524 unsigned char fpu_counter; 525 }; 526 527 /* 528 * Set IOPL bits in EFLAGS from given mask 529 */ 530 static inline void native_set_iopl_mask(unsigned mask) 531 { 532 #ifdef CONFIG_X86_32 533 unsigned int reg; 534 535 asm volatile ("pushfl;" 536 "popl %0;" 537 "andl %1, %0;" 538 "orl %2, %0;" 539 "pushl %0;" 540 "popfl" 541 : "=&r" (reg) 542 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 543 #endif 544 } 545 546 static inline void 547 native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) 548 { 549 tss->x86_tss.sp0 = thread->sp0; 550 #ifdef CONFIG_X86_32 551 /* Only happens when SEP is enabled, no need to test "SEP"arately: */ 552 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { 553 tss->x86_tss.ss1 = thread->sysenter_cs; 554 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 555 } 556 #endif 557 } 558 559 static inline void native_swapgs(void) 560 { 561 #ifdef CONFIG_X86_64 562 asm volatile("swapgs" ::: "memory"); 563 #endif 564 } 565 566 #ifdef CONFIG_PARAVIRT 567 #include <asm/paravirt.h> 568 #else 569 #define __cpuid native_cpuid 570 #define paravirt_enabled() 0 571 572 static inline void load_sp0(struct tss_struct *tss, 573 struct thread_struct *thread) 574 { 575 native_load_sp0(tss, thread); 576 } 577 578 #define set_iopl_mask native_set_iopl_mask 579 #endif /* CONFIG_PARAVIRT */ 580 581 /* 582 * Save the cr4 feature set we're using (ie 583 * Pentium 4MB enable and PPro Global page 584 * enable), so that any CPU's that boot up 585 * after us can get the correct flags. 586 */ 587 extern unsigned long mmu_cr4_features; 588 extern u32 *trampoline_cr4_features; 589 590 static inline void set_in_cr4(unsigned long mask) 591 { 592 unsigned long cr4; 593 594 mmu_cr4_features |= mask; 595 if (trampoline_cr4_features) 596 *trampoline_cr4_features = mmu_cr4_features; 597 cr4 = read_cr4(); 598 cr4 |= mask; 599 write_cr4(cr4); 600 } 601 602 static inline void clear_in_cr4(unsigned long mask) 603 { 604 unsigned long cr4; 605 606 mmu_cr4_features &= ~mask; 607 if (trampoline_cr4_features) 608 *trampoline_cr4_features = mmu_cr4_features; 609 cr4 = read_cr4(); 610 cr4 &= ~mask; 611 write_cr4(cr4); 612 } 613 614 typedef struct { 615 unsigned long seg; 616 } mm_segment_t; 617 618 619 /* Free all resources held by a thread. */ 620 extern void release_thread(struct task_struct *); 621 622 unsigned long get_wchan(struct task_struct *p); 623 624 /* 625 * Generic CPUID function 626 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 627 * resulting in stale register contents being returned. 628 */ 629 static inline void cpuid(unsigned int op, 630 unsigned int *eax, unsigned int *ebx, 631 unsigned int *ecx, unsigned int *edx) 632 { 633 *eax = op; 634 *ecx = 0; 635 __cpuid(eax, ebx, ecx, edx); 636 } 637 638 /* Some CPUID calls want 'count' to be placed in ecx */ 639 static inline void cpuid_count(unsigned int op, int count, 640 unsigned int *eax, unsigned int *ebx, 641 unsigned int *ecx, unsigned int *edx) 642 { 643 *eax = op; 644 *ecx = count; 645 __cpuid(eax, ebx, ecx, edx); 646 } 647 648 /* 649 * CPUID functions returning a single datum 650 */ 651 static inline unsigned int cpuid_eax(unsigned int op) 652 { 653 unsigned int eax, ebx, ecx, edx; 654 655 cpuid(op, &eax, &ebx, &ecx, &edx); 656 657 return eax; 658 } 659 660 static inline unsigned int cpuid_ebx(unsigned int op) 661 { 662 unsigned int eax, ebx, ecx, edx; 663 664 cpuid(op, &eax, &ebx, &ecx, &edx); 665 666 return ebx; 667 } 668 669 static inline unsigned int cpuid_ecx(unsigned int op) 670 { 671 unsigned int eax, ebx, ecx, edx; 672 673 cpuid(op, &eax, &ebx, &ecx, &edx); 674 675 return ecx; 676 } 677 678 static inline unsigned int cpuid_edx(unsigned int op) 679 { 680 unsigned int eax, ebx, ecx, edx; 681 682 cpuid(op, &eax, &ebx, &ecx, &edx); 683 684 return edx; 685 } 686 687 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 688 static inline void rep_nop(void) 689 { 690 asm volatile("rep; nop" ::: "memory"); 691 } 692 693 static inline void cpu_relax(void) 694 { 695 rep_nop(); 696 } 697 698 #define cpu_relax_lowlatency() cpu_relax() 699 700 /* Stop speculative execution and prefetching of modified code. */ 701 static inline void sync_core(void) 702 { 703 int tmp; 704 705 #ifdef CONFIG_M486 706 /* 707 * Do a CPUID if available, otherwise do a jump. The jump 708 * can conveniently enough be the jump around CPUID. 709 */ 710 asm volatile("cmpl %2,%1\n\t" 711 "jl 1f\n\t" 712 "cpuid\n" 713 "1:" 714 : "=a" (tmp) 715 : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) 716 : "ebx", "ecx", "edx", "memory"); 717 #else 718 /* 719 * CPUID is a barrier to speculative execution. 720 * Prefetched instructions are automatically 721 * invalidated when modified. 722 */ 723 asm volatile("cpuid" 724 : "=a" (tmp) 725 : "0" (1) 726 : "ebx", "ecx", "edx", "memory"); 727 #endif 728 } 729 730 extern void select_idle_routine(const struct cpuinfo_x86 *c); 731 extern void init_amd_e400_c1e_mask(void); 732 733 extern unsigned long boot_option_idle_override; 734 extern bool amd_e400_c1e_detected; 735 736 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 737 IDLE_POLL}; 738 739 extern void enable_sep_cpu(void); 740 extern int sysenter_setup(void); 741 742 extern void early_trap_init(void); 743 void early_trap_pf_init(void); 744 745 /* Defined in head.S */ 746 extern struct desc_ptr early_gdt_descr; 747 748 extern void cpu_set_gdt(int); 749 extern void switch_to_new_gdt(int); 750 extern void load_percpu_segment(int); 751 extern void cpu_init(void); 752 753 static inline unsigned long get_debugctlmsr(void) 754 { 755 unsigned long debugctlmsr = 0; 756 757 #ifndef CONFIG_X86_DEBUGCTLMSR 758 if (boot_cpu_data.x86 < 6) 759 return 0; 760 #endif 761 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 762 763 return debugctlmsr; 764 } 765 766 static inline void update_debugctlmsr(unsigned long debugctlmsr) 767 { 768 #ifndef CONFIG_X86_DEBUGCTLMSR 769 if (boot_cpu_data.x86 < 6) 770 return; 771 #endif 772 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 773 } 774 775 extern void set_task_blockstep(struct task_struct *task, bool on); 776 777 /* 778 * from system description table in BIOS. Mostly for MCA use, but 779 * others may find it useful: 780 */ 781 extern unsigned int machine_id; 782 extern unsigned int machine_submodel_id; 783 extern unsigned int BIOS_revision; 784 785 /* Boot loader type from the setup header: */ 786 extern int bootloader_type; 787 extern int bootloader_version; 788 789 extern char ignore_fpu_irq; 790 791 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 792 #define ARCH_HAS_PREFETCHW 793 #define ARCH_HAS_SPINLOCK_PREFETCH 794 795 #ifdef CONFIG_X86_32 796 # define BASE_PREFETCH ASM_NOP4 797 # define ARCH_HAS_PREFETCH 798 #else 799 # define BASE_PREFETCH "prefetcht0 (%1)" 800 #endif 801 802 /* 803 * Prefetch instructions for Pentium III (+) and AMD Athlon (+) 804 * 805 * It's not worth to care about 3dnow prefetches for the K6 806 * because they are microcoded there and very slow. 807 */ 808 static inline void prefetch(const void *x) 809 { 810 alternative_input(BASE_PREFETCH, 811 "prefetchnta (%1)", 812 X86_FEATURE_XMM, 813 "r" (x)); 814 } 815 816 /* 817 * 3dnow prefetch to get an exclusive cache line. 818 * Useful for spinlocks to avoid one state transition in the 819 * cache coherency protocol: 820 */ 821 static inline void prefetchw(const void *x) 822 { 823 alternative_input(BASE_PREFETCH, 824 "prefetchw (%1)", 825 X86_FEATURE_3DNOW, 826 "r" (x)); 827 } 828 829 static inline void spin_lock_prefetch(const void *x) 830 { 831 prefetchw(x); 832 } 833 834 #ifdef CONFIG_X86_32 835 /* 836 * User space process size: 3GB (default). 837 */ 838 #define TASK_SIZE PAGE_OFFSET 839 #define TASK_SIZE_MAX TASK_SIZE 840 #define STACK_TOP TASK_SIZE 841 #define STACK_TOP_MAX STACK_TOP 842 843 #define INIT_THREAD { \ 844 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 845 .vm86_info = NULL, \ 846 .sysenter_cs = __KERNEL_CS, \ 847 .io_bitmap_ptr = NULL, \ 848 } 849 850 /* 851 * Note that the .io_bitmap member must be extra-big. This is because 852 * the CPU will access an additional byte beyond the end of the IO 853 * permission bitmap. The extra byte must be all 1 bits, and must 854 * be within the limit. 855 */ 856 #define INIT_TSS { \ 857 .x86_tss = { \ 858 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 859 .ss0 = __KERNEL_DS, \ 860 .ss1 = __KERNEL_CS, \ 861 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 862 }, \ 863 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ 864 } 865 866 extern unsigned long thread_saved_pc(struct task_struct *tsk); 867 868 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) 869 #define KSTK_TOP(info) \ 870 ({ \ 871 unsigned long *__ptr = (unsigned long *)(info); \ 872 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ 873 }) 874 875 /* 876 * The below -8 is to reserve 8 bytes on top of the ring0 stack. 877 * This is necessary to guarantee that the entire "struct pt_regs" 878 * is accessible even if the CPU haven't stored the SS/ESP registers 879 * on the stack (interrupt gate does not save these registers 880 * when switching to the same priv ring). 881 * Therefore beware: accessing the ss/esp fields of the 882 * "struct pt_regs" is possible, but they may contain the 883 * completely wrong values. 884 */ 885 #define task_pt_regs(task) \ 886 ({ \ 887 struct pt_regs *__regs__; \ 888 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ 889 __regs__ - 1; \ 890 }) 891 892 #define KSTK_ESP(task) (task_pt_regs(task)->sp) 893 894 #else 895 /* 896 * User space process size. 47bits minus one guard page. 897 */ 898 #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) 899 900 /* This decides where the kernel will search for a free chunk of vm 901 * space during mmap's. 902 */ 903 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 904 0xc0000000 : 0xFFFFe000) 905 906 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ 907 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 908 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ 909 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 910 911 #define STACK_TOP TASK_SIZE 912 #define STACK_TOP_MAX TASK_SIZE_MAX 913 914 #define INIT_THREAD { \ 915 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 916 } 917 918 #define INIT_TSS { \ 919 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 920 } 921 922 /* 923 * Return saved PC of a blocked thread. 924 * What is this good for? it will be always the scheduler or ret_from_fork. 925 */ 926 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) 927 928 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 929 extern unsigned long KSTK_ESP(struct task_struct *task); 930 931 /* 932 * User space RSP while inside the SYSCALL fast path 933 */ 934 DECLARE_PER_CPU(unsigned long, old_rsp); 935 936 #endif /* CONFIG_X86_64 */ 937 938 extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 939 unsigned long new_sp); 940 941 /* 942 * This decides where the kernel will search for a free chunk of vm 943 * space during mmap's. 944 */ 945 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 946 947 #define KSTK_EIP(task) (task_pt_regs(task)->ip) 948 949 /* Get/set a process' ability to use the timestamp counter instruction */ 950 #define GET_TSC_CTL(adr) get_tsc_mode((adr)) 951 #define SET_TSC_CTL(val) set_tsc_mode((val)) 952 953 extern int get_tsc_mode(unsigned long adr); 954 extern int set_tsc_mode(unsigned int val); 955 956 extern u16 amd_get_nb_id(int cpu); 957 958 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) 959 { 960 uint32_t base, eax, signature[3]; 961 962 for (base = 0x40000000; base < 0x40010000; base += 0x100) { 963 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); 964 965 if (!memcmp(sig, signature, 12) && 966 (leaves == 0 || ((eax - base) >= leaves))) 967 return base; 968 } 969 970 return 0; 971 } 972 973 extern unsigned long arch_align_stack(unsigned long sp); 974 extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 975 976 void default_idle(void); 977 #ifdef CONFIG_XEN 978 bool xen_set_default_idle(void); 979 #else 980 #define xen_set_default_idle 0 981 #endif 982 983 void stop_this_cpu(void *dummy); 984 void df_debug(struct pt_regs *regs, long error_code); 985 #endif /* _ASM_X86_PROCESSOR_H */ 986