1 #ifndef _ASM_X86_PROCESSOR_H 2 #define _ASM_X86_PROCESSOR_H 3 4 #include <asm/processor-flags.h> 5 6 /* Forward declaration, a strange C thing */ 7 struct task_struct; 8 struct mm_struct; 9 10 #include <asm/vm86.h> 11 #include <asm/math_emu.h> 12 #include <asm/segment.h> 13 #include <asm/types.h> 14 #include <asm/sigcontext.h> 15 #include <asm/current.h> 16 #include <asm/cpufeature.h> 17 #include <asm/page.h> 18 #include <asm/pgtable_types.h> 19 #include <asm/percpu.h> 20 #include <asm/msr.h> 21 #include <asm/desc_defs.h> 22 #include <asm/nops.h> 23 #include <asm/special_insns.h> 24 25 #include <linux/personality.h> 26 #include <linux/cpumask.h> 27 #include <linux/cache.h> 28 #include <linux/threads.h> 29 #include <linux/math64.h> 30 #include <linux/err.h> 31 #include <linux/irqflags.h> 32 33 /* 34 * We handle most unaligned accesses in hardware. On the other hand 35 * unaligned DMA can be quite expensive on some Nehalem processors. 36 * 37 * Based on this we disable the IP header alignment in network drivers. 38 */ 39 #define NET_IP_ALIGN 0 40 41 #define HBP_NUM 4 42 /* 43 * Default implementation of macro that returns current 44 * instruction pointer ("program counter"). 45 */ 46 static inline void *current_text_addr(void) 47 { 48 void *pc; 49 50 asm volatile("mov $1f, %0; 1:":"=r" (pc)); 51 52 return pc; 53 } 54 55 #ifdef CONFIG_X86_VSMP 56 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 57 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 58 #else 59 # define ARCH_MIN_TASKALIGN 16 60 # define ARCH_MIN_MMSTRUCT_ALIGN 0 61 #endif 62 63 enum tlb_infos { 64 ENTRIES, 65 NR_INFO 66 }; 67 68 extern u16 __read_mostly tlb_lli_4k[NR_INFO]; 69 extern u16 __read_mostly tlb_lli_2m[NR_INFO]; 70 extern u16 __read_mostly tlb_lli_4m[NR_INFO]; 71 extern u16 __read_mostly tlb_lld_4k[NR_INFO]; 72 extern u16 __read_mostly tlb_lld_2m[NR_INFO]; 73 extern u16 __read_mostly tlb_lld_4m[NR_INFO]; 74 extern u16 __read_mostly tlb_lld_1g[NR_INFO]; 75 76 /* 77 * CPU type and hardware bug flags. Kept separately for each CPU. 78 * Members of this structure are referenced in head.S, so think twice 79 * before touching them. [mj] 80 */ 81 82 struct cpuinfo_x86 { 83 __u8 x86; /* CPU family */ 84 __u8 x86_vendor; /* CPU vendor */ 85 __u8 x86_model; 86 __u8 x86_mask; 87 #ifdef CONFIG_X86_32 88 char wp_works_ok; /* It doesn't on 386's */ 89 90 /* Problems on some 486Dx4's and old 386's: */ 91 char rfu; 92 char pad0; 93 char pad1; 94 #else 95 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 96 int x86_tlbsize; 97 #endif 98 __u8 x86_virt_bits; 99 __u8 x86_phys_bits; 100 /* CPUID returned core id bits: */ 101 __u8 x86_coreid_bits; 102 /* Max extended CPUID function supported: */ 103 __u32 extended_cpuid_level; 104 /* Maximum supported CPUID level, -1=no CPUID: */ 105 int cpuid_level; 106 __u32 x86_capability[NCAPINTS + NBUGINTS]; 107 char x86_vendor_id[16]; 108 char x86_model_id[64]; 109 /* in KB - valid for CPUS which support this call: */ 110 int x86_cache_size; 111 int x86_cache_alignment; /* In bytes */ 112 /* Cache QoS architectural values: */ 113 int x86_cache_max_rmid; /* max index */ 114 int x86_cache_occ_scale; /* scale to bytes */ 115 int x86_power; 116 unsigned long loops_per_jiffy; 117 /* cpuid returned max cores value: */ 118 u16 x86_max_cores; 119 u16 apicid; 120 u16 initial_apicid; 121 u16 x86_clflush_size; 122 /* number of cores as seen by the OS: */ 123 u16 booted_cores; 124 /* Physical processor id: */ 125 u16 phys_proc_id; 126 /* Core id: */ 127 u16 cpu_core_id; 128 /* Compute unit id */ 129 u8 compute_unit_id; 130 /* Index into per_cpu list: */ 131 u16 cpu_index; 132 u32 microcode; 133 }; 134 135 #define X86_VENDOR_INTEL 0 136 #define X86_VENDOR_CYRIX 1 137 #define X86_VENDOR_AMD 2 138 #define X86_VENDOR_UMC 3 139 #define X86_VENDOR_CENTAUR 5 140 #define X86_VENDOR_TRANSMETA 7 141 #define X86_VENDOR_NSC 8 142 #define X86_VENDOR_NUM 9 143 144 #define X86_VENDOR_UNKNOWN 0xff 145 146 /* 147 * capabilities of CPUs 148 */ 149 extern struct cpuinfo_x86 boot_cpu_data; 150 extern struct cpuinfo_x86 new_cpu_data; 151 152 extern struct tss_struct doublefault_tss; 153 extern __u32 cpu_caps_cleared[NCAPINTS]; 154 extern __u32 cpu_caps_set[NCAPINTS]; 155 156 #ifdef CONFIG_SMP 157 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); 158 #define cpu_data(cpu) per_cpu(cpu_info, cpu) 159 #else 160 #define cpu_info boot_cpu_data 161 #define cpu_data(cpu) boot_cpu_data 162 #endif 163 164 extern const struct seq_operations cpuinfo_op; 165 166 #define cache_line_size() (boot_cpu_data.x86_cache_alignment) 167 168 extern void cpu_detect(struct cpuinfo_x86 *c); 169 extern void fpu_detect(struct cpuinfo_x86 *c); 170 171 extern void early_cpu_init(void); 172 extern void identify_boot_cpu(void); 173 extern void identify_secondary_cpu(struct cpuinfo_x86 *); 174 extern void print_cpu_info(struct cpuinfo_x86 *); 175 void print_cpu_msr(struct cpuinfo_x86 *); 176 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 177 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 178 extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); 179 180 extern void detect_extended_topology(struct cpuinfo_x86 *c); 181 extern void detect_ht(struct cpuinfo_x86 *c); 182 183 #ifdef CONFIG_X86_32 184 extern int have_cpuid_p(void); 185 #else 186 static inline int have_cpuid_p(void) 187 { 188 return 1; 189 } 190 #endif 191 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 192 unsigned int *ecx, unsigned int *edx) 193 { 194 /* ecx is often an input as well as an output. */ 195 asm volatile("cpuid" 196 : "=a" (*eax), 197 "=b" (*ebx), 198 "=c" (*ecx), 199 "=d" (*edx) 200 : "0" (*eax), "2" (*ecx) 201 : "memory"); 202 } 203 204 static inline void load_cr3(pgd_t *pgdir) 205 { 206 write_cr3(__pa(pgdir)); 207 } 208 209 #ifdef CONFIG_X86_32 210 /* This is the TSS defined by the hardware. */ 211 struct x86_hw_tss { 212 unsigned short back_link, __blh; 213 unsigned long sp0; 214 unsigned short ss0, __ss0h; 215 unsigned long sp1; 216 /* ss1 caches MSR_IA32_SYSENTER_CS: */ 217 unsigned short ss1, __ss1h; 218 unsigned long sp2; 219 unsigned short ss2, __ss2h; 220 unsigned long __cr3; 221 unsigned long ip; 222 unsigned long flags; 223 unsigned long ax; 224 unsigned long cx; 225 unsigned long dx; 226 unsigned long bx; 227 unsigned long sp; 228 unsigned long bp; 229 unsigned long si; 230 unsigned long di; 231 unsigned short es, __esh; 232 unsigned short cs, __csh; 233 unsigned short ss, __ssh; 234 unsigned short ds, __dsh; 235 unsigned short fs, __fsh; 236 unsigned short gs, __gsh; 237 unsigned short ldt, __ldth; 238 unsigned short trace; 239 unsigned short io_bitmap_base; 240 241 } __attribute__((packed)); 242 #else 243 struct x86_hw_tss { 244 u32 reserved1; 245 u64 sp0; 246 u64 sp1; 247 u64 sp2; 248 u64 reserved2; 249 u64 ist[7]; 250 u32 reserved3; 251 u32 reserved4; 252 u16 reserved5; 253 u16 io_bitmap_base; 254 255 } __attribute__((packed)) ____cacheline_aligned; 256 #endif 257 258 /* 259 * IO-bitmap sizes: 260 */ 261 #define IO_BITMAP_BITS 65536 262 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 263 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 264 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 265 #define INVALID_IO_BITMAP_OFFSET 0x8000 266 267 struct tss_struct { 268 /* 269 * The hardware state: 270 */ 271 struct x86_hw_tss x86_tss; 272 273 /* 274 * The extra 1 is there because the CPU will access an 275 * additional byte beyond the end of the IO permission 276 * bitmap. The extra byte must be all 1 bits, and must 277 * be within the limit. 278 */ 279 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 280 281 /* 282 * .. and then another 0x100 bytes for the emergency kernel stack: 283 */ 284 unsigned long stack[64]; 285 286 } ____cacheline_aligned; 287 288 DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); 289 290 /* 291 * Save the original ist values for checking stack pointers during debugging 292 */ 293 struct orig_ist { 294 unsigned long ist[7]; 295 }; 296 297 #define MXCSR_DEFAULT 0x1f80 298 299 struct i387_fsave_struct { 300 u32 cwd; /* FPU Control Word */ 301 u32 swd; /* FPU Status Word */ 302 u32 twd; /* FPU Tag Word */ 303 u32 fip; /* FPU IP Offset */ 304 u32 fcs; /* FPU IP Selector */ 305 u32 foo; /* FPU Operand Pointer Offset */ 306 u32 fos; /* FPU Operand Pointer Selector */ 307 308 /* 8*10 bytes for each FP-reg = 80 bytes: */ 309 u32 st_space[20]; 310 311 /* Software status information [not touched by FSAVE ]: */ 312 u32 status; 313 }; 314 315 struct i387_fxsave_struct { 316 u16 cwd; /* Control Word */ 317 u16 swd; /* Status Word */ 318 u16 twd; /* Tag Word */ 319 u16 fop; /* Last Instruction Opcode */ 320 union { 321 struct { 322 u64 rip; /* Instruction Pointer */ 323 u64 rdp; /* Data Pointer */ 324 }; 325 struct { 326 u32 fip; /* FPU IP Offset */ 327 u32 fcs; /* FPU IP Selector */ 328 u32 foo; /* FPU Operand Offset */ 329 u32 fos; /* FPU Operand Selector */ 330 }; 331 }; 332 u32 mxcsr; /* MXCSR Register State */ 333 u32 mxcsr_mask; /* MXCSR Mask */ 334 335 /* 8*16 bytes for each FP-reg = 128 bytes: */ 336 u32 st_space[32]; 337 338 /* 16*16 bytes for each XMM-reg = 256 bytes: */ 339 u32 xmm_space[64]; 340 341 u32 padding[12]; 342 343 union { 344 u32 padding1[12]; 345 u32 sw_reserved[12]; 346 }; 347 348 } __attribute__((aligned(16))); 349 350 struct i387_soft_struct { 351 u32 cwd; 352 u32 swd; 353 u32 twd; 354 u32 fip; 355 u32 fcs; 356 u32 foo; 357 u32 fos; 358 /* 8*10 bytes for each FP-reg = 80 bytes: */ 359 u32 st_space[20]; 360 u8 ftop; 361 u8 changed; 362 u8 lookahead; 363 u8 no_update; 364 u8 rm; 365 u8 alimit; 366 struct math_emu_info *info; 367 u32 entry_eip; 368 }; 369 370 struct ymmh_struct { 371 /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ 372 u32 ymmh_space[64]; 373 }; 374 375 /* We don't support LWP yet: */ 376 struct lwp_struct { 377 u8 reserved[128]; 378 }; 379 380 struct bndreg { 381 u64 lower_bound; 382 u64 upper_bound; 383 } __packed; 384 385 struct bndcsr { 386 u64 bndcfgu; 387 u64 bndstatus; 388 } __packed; 389 390 struct xsave_hdr_struct { 391 u64 xstate_bv; 392 u64 xcomp_bv; 393 u64 reserved[6]; 394 } __attribute__((packed)); 395 396 struct xsave_struct { 397 struct i387_fxsave_struct i387; 398 struct xsave_hdr_struct xsave_hdr; 399 struct ymmh_struct ymmh; 400 struct lwp_struct lwp; 401 struct bndreg bndreg[4]; 402 struct bndcsr bndcsr; 403 /* new processor state extensions will go here */ 404 } __attribute__ ((packed, aligned (64))); 405 406 union thread_xstate { 407 struct i387_fsave_struct fsave; 408 struct i387_fxsave_struct fxsave; 409 struct i387_soft_struct soft; 410 struct xsave_struct xsave; 411 }; 412 413 struct fpu { 414 unsigned int last_cpu; 415 unsigned int has_fpu; 416 union thread_xstate *state; 417 }; 418 419 #ifdef CONFIG_X86_64 420 DECLARE_PER_CPU(struct orig_ist, orig_ist); 421 422 union irq_stack_union { 423 char irq_stack[IRQ_STACK_SIZE]; 424 /* 425 * GCC hardcodes the stack canary as %gs:40. Since the 426 * irq_stack is the object at %gs:0, we reserve the bottom 427 * 48 bytes of the irq stack for the canary. 428 */ 429 struct { 430 char gs_base[40]; 431 unsigned long stack_canary; 432 }; 433 }; 434 435 DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible; 436 DECLARE_INIT_PER_CPU(irq_stack_union); 437 438 DECLARE_PER_CPU(char *, irq_stack_ptr); 439 DECLARE_PER_CPU(unsigned int, irq_count); 440 extern asmlinkage void ignore_sysret(void); 441 #else /* X86_64 */ 442 #ifdef CONFIG_CC_STACKPROTECTOR 443 /* 444 * Make sure stack canary segment base is cached-aligned: 445 * "For Intel Atom processors, avoid non zero segment base address 446 * that is not aligned to cache line boundary at all cost." 447 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) 448 */ 449 struct stack_canary { 450 char __pad[20]; /* canary at %gs:20 */ 451 unsigned long canary; 452 }; 453 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 454 #endif 455 /* 456 * per-CPU IRQ handling stacks 457 */ 458 struct irq_stack { 459 u32 stack[THREAD_SIZE/sizeof(u32)]; 460 } __aligned(THREAD_SIZE); 461 462 DECLARE_PER_CPU(struct irq_stack *, hardirq_stack); 463 DECLARE_PER_CPU(struct irq_stack *, softirq_stack); 464 #endif /* X86_64 */ 465 466 extern unsigned int xstate_size; 467 extern void free_thread_xstate(struct task_struct *); 468 extern struct kmem_cache *task_xstate_cachep; 469 470 struct perf_event; 471 472 struct thread_struct { 473 /* Cached TLS descriptors: */ 474 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 475 unsigned long sp0; 476 unsigned long sp; 477 #ifdef CONFIG_X86_32 478 unsigned long sysenter_cs; 479 #else 480 unsigned long usersp; /* Copy from PDA */ 481 unsigned short es; 482 unsigned short ds; 483 unsigned short fsindex; 484 unsigned short gsindex; 485 #endif 486 #ifdef CONFIG_X86_32 487 unsigned long ip; 488 #endif 489 #ifdef CONFIG_X86_64 490 unsigned long fs; 491 #endif 492 unsigned long gs; 493 /* Save middle states of ptrace breakpoints */ 494 struct perf_event *ptrace_bps[HBP_NUM]; 495 /* Debug status used for traps, single steps, etc... */ 496 unsigned long debugreg6; 497 /* Keep track of the exact dr7 value set by the user */ 498 unsigned long ptrace_dr7; 499 /* Fault info: */ 500 unsigned long cr2; 501 unsigned long trap_nr; 502 unsigned long error_code; 503 /* floating point and extended processor state */ 504 struct fpu fpu; 505 #ifdef CONFIG_X86_32 506 /* Virtual 86 mode info */ 507 struct vm86_struct __user *vm86_info; 508 unsigned long screen_bitmap; 509 unsigned long v86flags; 510 unsigned long v86mask; 511 unsigned long saved_sp0; 512 unsigned int saved_fs; 513 unsigned int saved_gs; 514 #endif 515 /* IO permissions: */ 516 unsigned long *io_bitmap_ptr; 517 unsigned long iopl; 518 /* Max allowed port in the bitmap, in bytes: */ 519 unsigned io_bitmap_max; 520 /* 521 * fpu_counter contains the number of consecutive context switches 522 * that the FPU is used. If this is over a threshold, the lazy fpu 523 * saving becomes unlazy to save the trap. This is an unsigned char 524 * so that after 256 times the counter wraps and the behavior turns 525 * lazy again; this to deal with bursty apps that only use FPU for 526 * a short time 527 */ 528 unsigned char fpu_counter; 529 }; 530 531 /* 532 * Set IOPL bits in EFLAGS from given mask 533 */ 534 static inline void native_set_iopl_mask(unsigned mask) 535 { 536 #ifdef CONFIG_X86_32 537 unsigned int reg; 538 539 asm volatile ("pushfl;" 540 "popl %0;" 541 "andl %1, %0;" 542 "orl %2, %0;" 543 "pushl %0;" 544 "popfl" 545 : "=&r" (reg) 546 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 547 #endif 548 } 549 550 static inline void 551 native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) 552 { 553 tss->x86_tss.sp0 = thread->sp0; 554 #ifdef CONFIG_X86_32 555 /* Only happens when SEP is enabled, no need to test "SEP"arately: */ 556 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { 557 tss->x86_tss.ss1 = thread->sysenter_cs; 558 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 559 } 560 #endif 561 } 562 563 static inline void native_swapgs(void) 564 { 565 #ifdef CONFIG_X86_64 566 asm volatile("swapgs" ::: "memory"); 567 #endif 568 } 569 570 #ifdef CONFIG_PARAVIRT 571 #include <asm/paravirt.h> 572 #else 573 #define __cpuid native_cpuid 574 #define paravirt_enabled() 0 575 576 static inline void load_sp0(struct tss_struct *tss, 577 struct thread_struct *thread) 578 { 579 native_load_sp0(tss, thread); 580 } 581 582 #define set_iopl_mask native_set_iopl_mask 583 #endif /* CONFIG_PARAVIRT */ 584 585 typedef struct { 586 unsigned long seg; 587 } mm_segment_t; 588 589 590 /* Free all resources held by a thread. */ 591 extern void release_thread(struct task_struct *); 592 593 unsigned long get_wchan(struct task_struct *p); 594 595 /* 596 * Generic CPUID function 597 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 598 * resulting in stale register contents being returned. 599 */ 600 static inline void cpuid(unsigned int op, 601 unsigned int *eax, unsigned int *ebx, 602 unsigned int *ecx, unsigned int *edx) 603 { 604 *eax = op; 605 *ecx = 0; 606 __cpuid(eax, ebx, ecx, edx); 607 } 608 609 /* Some CPUID calls want 'count' to be placed in ecx */ 610 static inline void cpuid_count(unsigned int op, int count, 611 unsigned int *eax, unsigned int *ebx, 612 unsigned int *ecx, unsigned int *edx) 613 { 614 *eax = op; 615 *ecx = count; 616 __cpuid(eax, ebx, ecx, edx); 617 } 618 619 /* 620 * CPUID functions returning a single datum 621 */ 622 static inline unsigned int cpuid_eax(unsigned int op) 623 { 624 unsigned int eax, ebx, ecx, edx; 625 626 cpuid(op, &eax, &ebx, &ecx, &edx); 627 628 return eax; 629 } 630 631 static inline unsigned int cpuid_ebx(unsigned int op) 632 { 633 unsigned int eax, ebx, ecx, edx; 634 635 cpuid(op, &eax, &ebx, &ecx, &edx); 636 637 return ebx; 638 } 639 640 static inline unsigned int cpuid_ecx(unsigned int op) 641 { 642 unsigned int eax, ebx, ecx, edx; 643 644 cpuid(op, &eax, &ebx, &ecx, &edx); 645 646 return ecx; 647 } 648 649 static inline unsigned int cpuid_edx(unsigned int op) 650 { 651 unsigned int eax, ebx, ecx, edx; 652 653 cpuid(op, &eax, &ebx, &ecx, &edx); 654 655 return edx; 656 } 657 658 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 659 static inline void rep_nop(void) 660 { 661 asm volatile("rep; nop" ::: "memory"); 662 } 663 664 static inline void cpu_relax(void) 665 { 666 rep_nop(); 667 } 668 669 #define cpu_relax_lowlatency() cpu_relax() 670 671 /* Stop speculative execution and prefetching of modified code. */ 672 static inline void sync_core(void) 673 { 674 int tmp; 675 676 #ifdef CONFIG_M486 677 /* 678 * Do a CPUID if available, otherwise do a jump. The jump 679 * can conveniently enough be the jump around CPUID. 680 */ 681 asm volatile("cmpl %2,%1\n\t" 682 "jl 1f\n\t" 683 "cpuid\n" 684 "1:" 685 : "=a" (tmp) 686 : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) 687 : "ebx", "ecx", "edx", "memory"); 688 #else 689 /* 690 * CPUID is a barrier to speculative execution. 691 * Prefetched instructions are automatically 692 * invalidated when modified. 693 */ 694 asm volatile("cpuid" 695 : "=a" (tmp) 696 : "0" (1) 697 : "ebx", "ecx", "edx", "memory"); 698 #endif 699 } 700 701 extern void select_idle_routine(const struct cpuinfo_x86 *c); 702 extern void init_amd_e400_c1e_mask(void); 703 704 extern unsigned long boot_option_idle_override; 705 extern bool amd_e400_c1e_detected; 706 707 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 708 IDLE_POLL}; 709 710 extern void enable_sep_cpu(void); 711 extern int sysenter_setup(void); 712 713 extern void early_trap_init(void); 714 void early_trap_pf_init(void); 715 716 /* Defined in head.S */ 717 extern struct desc_ptr early_gdt_descr; 718 719 extern void cpu_set_gdt(int); 720 extern void switch_to_new_gdt(int); 721 extern void load_percpu_segment(int); 722 extern void cpu_init(void); 723 724 static inline unsigned long get_debugctlmsr(void) 725 { 726 unsigned long debugctlmsr = 0; 727 728 #ifndef CONFIG_X86_DEBUGCTLMSR 729 if (boot_cpu_data.x86 < 6) 730 return 0; 731 #endif 732 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 733 734 return debugctlmsr; 735 } 736 737 static inline void update_debugctlmsr(unsigned long debugctlmsr) 738 { 739 #ifndef CONFIG_X86_DEBUGCTLMSR 740 if (boot_cpu_data.x86 < 6) 741 return; 742 #endif 743 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 744 } 745 746 extern void set_task_blockstep(struct task_struct *task, bool on); 747 748 /* 749 * from system description table in BIOS. Mostly for MCA use, but 750 * others may find it useful: 751 */ 752 extern unsigned int machine_id; 753 extern unsigned int machine_submodel_id; 754 extern unsigned int BIOS_revision; 755 756 /* Boot loader type from the setup header: */ 757 extern int bootloader_type; 758 extern int bootloader_version; 759 760 extern char ignore_fpu_irq; 761 762 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 763 #define ARCH_HAS_PREFETCHW 764 #define ARCH_HAS_SPINLOCK_PREFETCH 765 766 #ifdef CONFIG_X86_32 767 # define BASE_PREFETCH ASM_NOP4 768 # define ARCH_HAS_PREFETCH 769 #else 770 # define BASE_PREFETCH "prefetcht0 (%1)" 771 #endif 772 773 /* 774 * Prefetch instructions for Pentium III (+) and AMD Athlon (+) 775 * 776 * It's not worth to care about 3dnow prefetches for the K6 777 * because they are microcoded there and very slow. 778 */ 779 static inline void prefetch(const void *x) 780 { 781 alternative_input(BASE_PREFETCH, 782 "prefetchnta (%1)", 783 X86_FEATURE_XMM, 784 "r" (x)); 785 } 786 787 /* 788 * 3dnow prefetch to get an exclusive cache line. 789 * Useful for spinlocks to avoid one state transition in the 790 * cache coherency protocol: 791 */ 792 static inline void prefetchw(const void *x) 793 { 794 alternative_input(BASE_PREFETCH, 795 "prefetchw (%1)", 796 X86_FEATURE_3DNOW, 797 "r" (x)); 798 } 799 800 static inline void spin_lock_prefetch(const void *x) 801 { 802 prefetchw(x); 803 } 804 805 #ifdef CONFIG_X86_32 806 /* 807 * User space process size: 3GB (default). 808 */ 809 #define TASK_SIZE PAGE_OFFSET 810 #define TASK_SIZE_MAX TASK_SIZE 811 #define STACK_TOP TASK_SIZE 812 #define STACK_TOP_MAX STACK_TOP 813 814 #define INIT_THREAD { \ 815 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 816 .vm86_info = NULL, \ 817 .sysenter_cs = __KERNEL_CS, \ 818 .io_bitmap_ptr = NULL, \ 819 } 820 821 /* 822 * Note that the .io_bitmap member must be extra-big. This is because 823 * the CPU will access an additional byte beyond the end of the IO 824 * permission bitmap. The extra byte must be all 1 bits, and must 825 * be within the limit. 826 */ 827 #define INIT_TSS { \ 828 .x86_tss = { \ 829 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 830 .ss0 = __KERNEL_DS, \ 831 .ss1 = __KERNEL_CS, \ 832 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 833 }, \ 834 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ 835 } 836 837 extern unsigned long thread_saved_pc(struct task_struct *tsk); 838 839 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) 840 #define KSTK_TOP(info) \ 841 ({ \ 842 unsigned long *__ptr = (unsigned long *)(info); \ 843 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ 844 }) 845 846 /* 847 * The below -8 is to reserve 8 bytes on top of the ring0 stack. 848 * This is necessary to guarantee that the entire "struct pt_regs" 849 * is accessible even if the CPU haven't stored the SS/ESP registers 850 * on the stack (interrupt gate does not save these registers 851 * when switching to the same priv ring). 852 * Therefore beware: accessing the ss/esp fields of the 853 * "struct pt_regs" is possible, but they may contain the 854 * completely wrong values. 855 */ 856 #define task_pt_regs(task) \ 857 ({ \ 858 struct pt_regs *__regs__; \ 859 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ 860 __regs__ - 1; \ 861 }) 862 863 #define KSTK_ESP(task) (task_pt_regs(task)->sp) 864 865 #else 866 /* 867 * User space process size. 47bits minus one guard page. The guard 868 * page is necessary on Intel CPUs: if a SYSCALL instruction is at 869 * the highest possible canonical userspace address, then that 870 * syscall will enter the kernel with a non-canonical return 871 * address, and SYSRET will explode dangerously. We avoid this 872 * particular problem by preventing anything from being mapped 873 * at the maximum canonical address. 874 */ 875 #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) 876 877 /* This decides where the kernel will search for a free chunk of vm 878 * space during mmap's. 879 */ 880 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 881 0xc0000000 : 0xFFFFe000) 882 883 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ 884 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 885 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ 886 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 887 888 #define STACK_TOP TASK_SIZE 889 #define STACK_TOP_MAX TASK_SIZE_MAX 890 891 #define INIT_THREAD { \ 892 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 893 } 894 895 #define INIT_TSS { \ 896 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 897 } 898 899 /* 900 * Return saved PC of a blocked thread. 901 * What is this good for? it will be always the scheduler or ret_from_fork. 902 */ 903 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) 904 905 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 906 extern unsigned long KSTK_ESP(struct task_struct *task); 907 908 /* 909 * User space RSP while inside the SYSCALL fast path 910 */ 911 DECLARE_PER_CPU(unsigned long, old_rsp); 912 913 #endif /* CONFIG_X86_64 */ 914 915 extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 916 unsigned long new_sp); 917 918 /* 919 * This decides where the kernel will search for a free chunk of vm 920 * space during mmap's. 921 */ 922 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 923 924 #define KSTK_EIP(task) (task_pt_regs(task)->ip) 925 926 /* Get/set a process' ability to use the timestamp counter instruction */ 927 #define GET_TSC_CTL(adr) get_tsc_mode((adr)) 928 #define SET_TSC_CTL(val) set_tsc_mode((val)) 929 930 extern int get_tsc_mode(unsigned long adr); 931 extern int set_tsc_mode(unsigned int val); 932 933 /* Register/unregister a process' MPX related resource */ 934 #define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk)) 935 #define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk)) 936 937 #ifdef CONFIG_X86_INTEL_MPX 938 extern int mpx_enable_management(struct task_struct *tsk); 939 extern int mpx_disable_management(struct task_struct *tsk); 940 #else 941 static inline int mpx_enable_management(struct task_struct *tsk) 942 { 943 return -EINVAL; 944 } 945 static inline int mpx_disable_management(struct task_struct *tsk) 946 { 947 return -EINVAL; 948 } 949 #endif /* CONFIG_X86_INTEL_MPX */ 950 951 extern u16 amd_get_nb_id(int cpu); 952 953 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) 954 { 955 uint32_t base, eax, signature[3]; 956 957 for (base = 0x40000000; base < 0x40010000; base += 0x100) { 958 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); 959 960 if (!memcmp(sig, signature, 12) && 961 (leaves == 0 || ((eax - base) >= leaves))) 962 return base; 963 } 964 965 return 0; 966 } 967 968 extern unsigned long arch_align_stack(unsigned long sp); 969 extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 970 971 void default_idle(void); 972 #ifdef CONFIG_XEN 973 bool xen_set_default_idle(void); 974 #else 975 #define xen_set_default_idle 0 976 #endif 977 978 void stop_this_cpu(void *dummy); 979 void df_debug(struct pt_regs *regs, long error_code); 980 #endif /* _ASM_X86_PROCESSOR_H */ 981