1 #ifndef _ASM_X86_PROCESSOR_H 2 #define _ASM_X86_PROCESSOR_H 3 4 #include <asm/processor-flags.h> 5 6 /* Forward declaration, a strange C thing */ 7 struct task_struct; 8 struct mm_struct; 9 10 #include <asm/vm86.h> 11 #include <asm/math_emu.h> 12 #include <asm/segment.h> 13 #include <asm/types.h> 14 #include <asm/sigcontext.h> 15 #include <asm/current.h> 16 #include <asm/cpufeature.h> 17 #include <asm/system.h> 18 #include <asm/page.h> 19 #include <asm/pgtable_types.h> 20 #include <asm/percpu.h> 21 #include <asm/msr.h> 22 #include <asm/desc_defs.h> 23 #include <asm/nops.h> 24 #include <asm/ds.h> 25 26 #include <linux/personality.h> 27 #include <linux/cpumask.h> 28 #include <linux/cache.h> 29 #include <linux/threads.h> 30 #include <linux/math64.h> 31 #include <linux/init.h> 32 33 #define HBP_NUM 4 34 /* 35 * Default implementation of macro that returns current 36 * instruction pointer ("program counter"). 37 */ 38 static inline void *current_text_addr(void) 39 { 40 void *pc; 41 42 asm volatile("mov $1f, %0; 1:":"=r" (pc)); 43 44 return pc; 45 } 46 47 #ifdef CONFIG_X86_VSMP 48 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 49 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 50 #else 51 # define ARCH_MIN_TASKALIGN 16 52 # define ARCH_MIN_MMSTRUCT_ALIGN 0 53 #endif 54 55 /* 56 * CPU type and hardware bug flags. Kept separately for each CPU. 57 * Members of this structure are referenced in head.S, so think twice 58 * before touching them. [mj] 59 */ 60 61 struct cpuinfo_x86 { 62 __u8 x86; /* CPU family */ 63 __u8 x86_vendor; /* CPU vendor */ 64 __u8 x86_model; 65 __u8 x86_mask; 66 #ifdef CONFIG_X86_32 67 char wp_works_ok; /* It doesn't on 386's */ 68 69 /* Problems on some 486Dx4's and old 386's: */ 70 char hlt_works_ok; 71 char hard_math; 72 char rfu; 73 char fdiv_bug; 74 char f00f_bug; 75 char coma_bug; 76 char pad0; 77 #else 78 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 79 int x86_tlbsize; 80 #endif 81 __u8 x86_virt_bits; 82 __u8 x86_phys_bits; 83 /* CPUID returned core id bits: */ 84 __u8 x86_coreid_bits; 85 /* Max extended CPUID function supported: */ 86 __u32 extended_cpuid_level; 87 /* Maximum supported CPUID level, -1=no CPUID: */ 88 int cpuid_level; 89 __u32 x86_capability[NCAPINTS]; 90 char x86_vendor_id[16]; 91 char x86_model_id[64]; 92 /* in KB - valid for CPUS which support this call: */ 93 int x86_cache_size; 94 int x86_cache_alignment; /* In bytes */ 95 int x86_power; 96 unsigned long loops_per_jiffy; 97 #ifdef CONFIG_SMP 98 /* cpus sharing the last level cache: */ 99 cpumask_var_t llc_shared_map; 100 #endif 101 /* cpuid returned max cores value: */ 102 u16 x86_max_cores; 103 u16 apicid; 104 u16 initial_apicid; 105 u16 x86_clflush_size; 106 #ifdef CONFIG_SMP 107 /* number of cores as seen by the OS: */ 108 u16 booted_cores; 109 /* Physical processor id: */ 110 u16 phys_proc_id; 111 /* Core id: */ 112 u16 cpu_core_id; 113 /* Index into per_cpu list: */ 114 u16 cpu_index; 115 #endif 116 unsigned int x86_hyper_vendor; 117 } __attribute__((__aligned__(SMP_CACHE_BYTES))); 118 119 #define X86_VENDOR_INTEL 0 120 #define X86_VENDOR_CYRIX 1 121 #define X86_VENDOR_AMD 2 122 #define X86_VENDOR_UMC 3 123 #define X86_VENDOR_CENTAUR 5 124 #define X86_VENDOR_TRANSMETA 7 125 #define X86_VENDOR_NSC 8 126 #define X86_VENDOR_NUM 9 127 128 #define X86_VENDOR_UNKNOWN 0xff 129 130 #define X86_HYPER_VENDOR_NONE 0 131 #define X86_HYPER_VENDOR_VMWARE 1 132 133 /* 134 * capabilities of CPUs 135 */ 136 extern struct cpuinfo_x86 boot_cpu_data; 137 extern struct cpuinfo_x86 new_cpu_data; 138 139 extern struct tss_struct doublefault_tss; 140 extern __u32 cpu_caps_cleared[NCAPINTS]; 141 extern __u32 cpu_caps_set[NCAPINTS]; 142 143 #ifdef CONFIG_SMP 144 DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 145 #define cpu_data(cpu) per_cpu(cpu_info, cpu) 146 #define current_cpu_data __get_cpu_var(cpu_info) 147 #else 148 #define cpu_data(cpu) boot_cpu_data 149 #define current_cpu_data boot_cpu_data 150 #endif 151 152 extern const struct seq_operations cpuinfo_op; 153 154 static inline int hlt_works(int cpu) 155 { 156 #ifdef CONFIG_X86_32 157 return cpu_data(cpu).hlt_works_ok; 158 #else 159 return 1; 160 #endif 161 } 162 163 #define cache_line_size() (boot_cpu_data.x86_cache_alignment) 164 165 extern void cpu_detect(struct cpuinfo_x86 *c); 166 167 extern struct pt_regs *idle_regs(struct pt_regs *); 168 169 extern void early_cpu_init(void); 170 extern void identify_boot_cpu(void); 171 extern void identify_secondary_cpu(struct cpuinfo_x86 *); 172 extern void print_cpu_info(struct cpuinfo_x86 *); 173 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 174 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 175 extern unsigned short num_cache_leaves; 176 177 extern void detect_extended_topology(struct cpuinfo_x86 *c); 178 extern void detect_ht(struct cpuinfo_x86 *c); 179 180 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 181 unsigned int *ecx, unsigned int *edx) 182 { 183 /* ecx is often an input as well as an output. */ 184 asm volatile("cpuid" 185 : "=a" (*eax), 186 "=b" (*ebx), 187 "=c" (*ecx), 188 "=d" (*edx) 189 : "0" (*eax), "2" (*ecx)); 190 } 191 192 static inline void load_cr3(pgd_t *pgdir) 193 { 194 write_cr3(__pa(pgdir)); 195 } 196 197 #ifdef CONFIG_X86_32 198 /* This is the TSS defined by the hardware. */ 199 struct x86_hw_tss { 200 unsigned short back_link, __blh; 201 unsigned long sp0; 202 unsigned short ss0, __ss0h; 203 unsigned long sp1; 204 /* ss1 caches MSR_IA32_SYSENTER_CS: */ 205 unsigned short ss1, __ss1h; 206 unsigned long sp2; 207 unsigned short ss2, __ss2h; 208 unsigned long __cr3; 209 unsigned long ip; 210 unsigned long flags; 211 unsigned long ax; 212 unsigned long cx; 213 unsigned long dx; 214 unsigned long bx; 215 unsigned long sp; 216 unsigned long bp; 217 unsigned long si; 218 unsigned long di; 219 unsigned short es, __esh; 220 unsigned short cs, __csh; 221 unsigned short ss, __ssh; 222 unsigned short ds, __dsh; 223 unsigned short fs, __fsh; 224 unsigned short gs, __gsh; 225 unsigned short ldt, __ldth; 226 unsigned short trace; 227 unsigned short io_bitmap_base; 228 229 } __attribute__((packed)); 230 #else 231 struct x86_hw_tss { 232 u32 reserved1; 233 u64 sp0; 234 u64 sp1; 235 u64 sp2; 236 u64 reserved2; 237 u64 ist[7]; 238 u32 reserved3; 239 u32 reserved4; 240 u16 reserved5; 241 u16 io_bitmap_base; 242 243 } __attribute__((packed)) ____cacheline_aligned; 244 #endif 245 246 /* 247 * IO-bitmap sizes: 248 */ 249 #define IO_BITMAP_BITS 65536 250 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 251 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 252 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 253 #define INVALID_IO_BITMAP_OFFSET 0x8000 254 255 struct tss_struct { 256 /* 257 * The hardware state: 258 */ 259 struct x86_hw_tss x86_tss; 260 261 /* 262 * The extra 1 is there because the CPU will access an 263 * additional byte beyond the end of the IO permission 264 * bitmap. The extra byte must be all 1 bits, and must 265 * be within the limit. 266 */ 267 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 268 269 /* 270 * .. and then another 0x100 bytes for the emergency kernel stack: 271 */ 272 unsigned long stack[64]; 273 274 } ____cacheline_aligned; 275 276 DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); 277 278 /* 279 * Save the original ist values for checking stack pointers during debugging 280 */ 281 struct orig_ist { 282 unsigned long ist[7]; 283 }; 284 285 #define MXCSR_DEFAULT 0x1f80 286 287 struct i387_fsave_struct { 288 u32 cwd; /* FPU Control Word */ 289 u32 swd; /* FPU Status Word */ 290 u32 twd; /* FPU Tag Word */ 291 u32 fip; /* FPU IP Offset */ 292 u32 fcs; /* FPU IP Selector */ 293 u32 foo; /* FPU Operand Pointer Offset */ 294 u32 fos; /* FPU Operand Pointer Selector */ 295 296 /* 8*10 bytes for each FP-reg = 80 bytes: */ 297 u32 st_space[20]; 298 299 /* Software status information [not touched by FSAVE ]: */ 300 u32 status; 301 }; 302 303 struct i387_fxsave_struct { 304 u16 cwd; /* Control Word */ 305 u16 swd; /* Status Word */ 306 u16 twd; /* Tag Word */ 307 u16 fop; /* Last Instruction Opcode */ 308 union { 309 struct { 310 u64 rip; /* Instruction Pointer */ 311 u64 rdp; /* Data Pointer */ 312 }; 313 struct { 314 u32 fip; /* FPU IP Offset */ 315 u32 fcs; /* FPU IP Selector */ 316 u32 foo; /* FPU Operand Offset */ 317 u32 fos; /* FPU Operand Selector */ 318 }; 319 }; 320 u32 mxcsr; /* MXCSR Register State */ 321 u32 mxcsr_mask; /* MXCSR Mask */ 322 323 /* 8*16 bytes for each FP-reg = 128 bytes: */ 324 u32 st_space[32]; 325 326 /* 16*16 bytes for each XMM-reg = 256 bytes: */ 327 u32 xmm_space[64]; 328 329 u32 padding[12]; 330 331 union { 332 u32 padding1[12]; 333 u32 sw_reserved[12]; 334 }; 335 336 } __attribute__((aligned(16))); 337 338 struct i387_soft_struct { 339 u32 cwd; 340 u32 swd; 341 u32 twd; 342 u32 fip; 343 u32 fcs; 344 u32 foo; 345 u32 fos; 346 /* 8*10 bytes for each FP-reg = 80 bytes: */ 347 u32 st_space[20]; 348 u8 ftop; 349 u8 changed; 350 u8 lookahead; 351 u8 no_update; 352 u8 rm; 353 u8 alimit; 354 struct math_emu_info *info; 355 u32 entry_eip; 356 }; 357 358 struct ymmh_struct { 359 /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ 360 u32 ymmh_space[64]; 361 }; 362 363 struct xsave_hdr_struct { 364 u64 xstate_bv; 365 u64 reserved1[2]; 366 u64 reserved2[5]; 367 } __attribute__((packed)); 368 369 struct xsave_struct { 370 struct i387_fxsave_struct i387; 371 struct xsave_hdr_struct xsave_hdr; 372 struct ymmh_struct ymmh; 373 /* new processor state extensions will go here */ 374 } __attribute__ ((packed, aligned (64))); 375 376 union thread_xstate { 377 struct i387_fsave_struct fsave; 378 struct i387_fxsave_struct fxsave; 379 struct i387_soft_struct soft; 380 struct xsave_struct xsave; 381 }; 382 383 #ifdef CONFIG_X86_64 384 DECLARE_PER_CPU(struct orig_ist, orig_ist); 385 386 union irq_stack_union { 387 char irq_stack[IRQ_STACK_SIZE]; 388 /* 389 * GCC hardcodes the stack canary as %gs:40. Since the 390 * irq_stack is the object at %gs:0, we reserve the bottom 391 * 48 bytes of the irq stack for the canary. 392 */ 393 struct { 394 char gs_base[40]; 395 unsigned long stack_canary; 396 }; 397 }; 398 399 DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union); 400 DECLARE_INIT_PER_CPU(irq_stack_union); 401 402 DECLARE_PER_CPU(char *, irq_stack_ptr); 403 DECLARE_PER_CPU(unsigned int, irq_count); 404 extern unsigned long kernel_eflags; 405 extern asmlinkage void ignore_sysret(void); 406 #else /* X86_64 */ 407 #ifdef CONFIG_CC_STACKPROTECTOR 408 /* 409 * Make sure stack canary segment base is cached-aligned: 410 * "For Intel Atom processors, avoid non zero segment base address 411 * that is not aligned to cache line boundary at all cost." 412 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) 413 */ 414 struct stack_canary { 415 char __pad[20]; /* canary at %gs:20 */ 416 unsigned long canary; 417 }; 418 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 419 #endif 420 #endif /* X86_64 */ 421 422 extern unsigned int xstate_size; 423 extern void free_thread_xstate(struct task_struct *); 424 extern struct kmem_cache *task_xstate_cachep; 425 426 struct perf_event; 427 428 struct thread_struct { 429 /* Cached TLS descriptors: */ 430 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 431 unsigned long sp0; 432 unsigned long sp; 433 #ifdef CONFIG_X86_32 434 unsigned long sysenter_cs; 435 #else 436 unsigned long usersp; /* Copy from PDA */ 437 unsigned short es; 438 unsigned short ds; 439 unsigned short fsindex; 440 unsigned short gsindex; 441 #endif 442 #ifdef CONFIG_X86_32 443 unsigned long ip; 444 #endif 445 #ifdef CONFIG_X86_64 446 unsigned long fs; 447 #endif 448 unsigned long gs; 449 /* Save middle states of ptrace breakpoints */ 450 struct perf_event *ptrace_bps[HBP_NUM]; 451 /* Debug status used for traps, single steps, etc... */ 452 unsigned long debugreg6; 453 /* Fault info: */ 454 unsigned long cr2; 455 unsigned long trap_no; 456 unsigned long error_code; 457 /* floating point and extended processor state */ 458 union thread_xstate *xstate; 459 #ifdef CONFIG_X86_32 460 /* Virtual 86 mode info */ 461 struct vm86_struct __user *vm86_info; 462 unsigned long screen_bitmap; 463 unsigned long v86flags; 464 unsigned long v86mask; 465 unsigned long saved_sp0; 466 unsigned int saved_fs; 467 unsigned int saved_gs; 468 #endif 469 /* IO permissions: */ 470 unsigned long *io_bitmap_ptr; 471 unsigned long iopl; 472 /* Max allowed port in the bitmap, in bytes: */ 473 unsigned io_bitmap_max; 474 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 475 unsigned long debugctlmsr; 476 /* Debug Store context; see asm/ds.h */ 477 struct ds_context *ds_ctx; 478 }; 479 480 static inline unsigned long native_get_debugreg(int regno) 481 { 482 unsigned long val = 0; /* Damn you, gcc! */ 483 484 switch (regno) { 485 case 0: 486 asm("mov %%db0, %0" :"=r" (val)); 487 break; 488 case 1: 489 asm("mov %%db1, %0" :"=r" (val)); 490 break; 491 case 2: 492 asm("mov %%db2, %0" :"=r" (val)); 493 break; 494 case 3: 495 asm("mov %%db3, %0" :"=r" (val)); 496 break; 497 case 6: 498 asm("mov %%db6, %0" :"=r" (val)); 499 break; 500 case 7: 501 asm("mov %%db7, %0" :"=r" (val)); 502 break; 503 default: 504 BUG(); 505 } 506 return val; 507 } 508 509 static inline void native_set_debugreg(int regno, unsigned long value) 510 { 511 switch (regno) { 512 case 0: 513 asm("mov %0, %%db0" ::"r" (value)); 514 break; 515 case 1: 516 asm("mov %0, %%db1" ::"r" (value)); 517 break; 518 case 2: 519 asm("mov %0, %%db2" ::"r" (value)); 520 break; 521 case 3: 522 asm("mov %0, %%db3" ::"r" (value)); 523 break; 524 case 6: 525 asm("mov %0, %%db6" ::"r" (value)); 526 break; 527 case 7: 528 asm("mov %0, %%db7" ::"r" (value)); 529 break; 530 default: 531 BUG(); 532 } 533 } 534 535 /* 536 * Set IOPL bits in EFLAGS from given mask 537 */ 538 static inline void native_set_iopl_mask(unsigned mask) 539 { 540 #ifdef CONFIG_X86_32 541 unsigned int reg; 542 543 asm volatile ("pushfl;" 544 "popl %0;" 545 "andl %1, %0;" 546 "orl %2, %0;" 547 "pushl %0;" 548 "popfl" 549 : "=&r" (reg) 550 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 551 #endif 552 } 553 554 static inline void 555 native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) 556 { 557 tss->x86_tss.sp0 = thread->sp0; 558 #ifdef CONFIG_X86_32 559 /* Only happens when SEP is enabled, no need to test "SEP"arately: */ 560 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { 561 tss->x86_tss.ss1 = thread->sysenter_cs; 562 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 563 } 564 #endif 565 } 566 567 static inline void native_swapgs(void) 568 { 569 #ifdef CONFIG_X86_64 570 asm volatile("swapgs" ::: "memory"); 571 #endif 572 } 573 574 #ifdef CONFIG_PARAVIRT 575 #include <asm/paravirt.h> 576 #else 577 #define __cpuid native_cpuid 578 #define paravirt_enabled() 0 579 580 /* 581 * These special macros can be used to get or set a debugging register 582 */ 583 #define get_debugreg(var, register) \ 584 (var) = native_get_debugreg(register) 585 #define set_debugreg(value, register) \ 586 native_set_debugreg(register, value) 587 588 static inline void load_sp0(struct tss_struct *tss, 589 struct thread_struct *thread) 590 { 591 native_load_sp0(tss, thread); 592 } 593 594 #define set_iopl_mask native_set_iopl_mask 595 #endif /* CONFIG_PARAVIRT */ 596 597 /* 598 * Save the cr4 feature set we're using (ie 599 * Pentium 4MB enable and PPro Global page 600 * enable), so that any CPU's that boot up 601 * after us can get the correct flags. 602 */ 603 extern unsigned long mmu_cr4_features; 604 605 static inline void set_in_cr4(unsigned long mask) 606 { 607 unsigned cr4; 608 609 mmu_cr4_features |= mask; 610 cr4 = read_cr4(); 611 cr4 |= mask; 612 write_cr4(cr4); 613 } 614 615 static inline void clear_in_cr4(unsigned long mask) 616 { 617 unsigned cr4; 618 619 mmu_cr4_features &= ~mask; 620 cr4 = read_cr4(); 621 cr4 &= ~mask; 622 write_cr4(cr4); 623 } 624 625 typedef struct { 626 unsigned long seg; 627 } mm_segment_t; 628 629 630 /* 631 * create a kernel thread without removing it from tasklists 632 */ 633 extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 634 635 /* Free all resources held by a thread. */ 636 extern void release_thread(struct task_struct *); 637 638 /* Prepare to copy thread state - unlazy all lazy state */ 639 extern void prepare_to_copy(struct task_struct *tsk); 640 641 unsigned long get_wchan(struct task_struct *p); 642 643 /* 644 * Generic CPUID function 645 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 646 * resulting in stale register contents being returned. 647 */ 648 static inline void cpuid(unsigned int op, 649 unsigned int *eax, unsigned int *ebx, 650 unsigned int *ecx, unsigned int *edx) 651 { 652 *eax = op; 653 *ecx = 0; 654 __cpuid(eax, ebx, ecx, edx); 655 } 656 657 /* Some CPUID calls want 'count' to be placed in ecx */ 658 static inline void cpuid_count(unsigned int op, int count, 659 unsigned int *eax, unsigned int *ebx, 660 unsigned int *ecx, unsigned int *edx) 661 { 662 *eax = op; 663 *ecx = count; 664 __cpuid(eax, ebx, ecx, edx); 665 } 666 667 /* 668 * CPUID functions returning a single datum 669 */ 670 static inline unsigned int cpuid_eax(unsigned int op) 671 { 672 unsigned int eax, ebx, ecx, edx; 673 674 cpuid(op, &eax, &ebx, &ecx, &edx); 675 676 return eax; 677 } 678 679 static inline unsigned int cpuid_ebx(unsigned int op) 680 { 681 unsigned int eax, ebx, ecx, edx; 682 683 cpuid(op, &eax, &ebx, &ecx, &edx); 684 685 return ebx; 686 } 687 688 static inline unsigned int cpuid_ecx(unsigned int op) 689 { 690 unsigned int eax, ebx, ecx, edx; 691 692 cpuid(op, &eax, &ebx, &ecx, &edx); 693 694 return ecx; 695 } 696 697 static inline unsigned int cpuid_edx(unsigned int op) 698 { 699 unsigned int eax, ebx, ecx, edx; 700 701 cpuid(op, &eax, &ebx, &ecx, &edx); 702 703 return edx; 704 } 705 706 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 707 static inline void rep_nop(void) 708 { 709 asm volatile("rep; nop" ::: "memory"); 710 } 711 712 static inline void cpu_relax(void) 713 { 714 rep_nop(); 715 } 716 717 /* Stop speculative execution and prefetching of modified code. */ 718 static inline void sync_core(void) 719 { 720 int tmp; 721 722 #if defined(CONFIG_M386) || defined(CONFIG_M486) 723 if (boot_cpu_data.x86 < 5) 724 /* There is no speculative execution. 725 * jmp is a barrier to prefetching. */ 726 asm volatile("jmp 1f\n1:\n" ::: "memory"); 727 else 728 #endif 729 /* cpuid is a barrier to speculative execution. 730 * Prefetched instructions are automatically 731 * invalidated when modified. */ 732 asm volatile("cpuid" : "=a" (tmp) : "0" (1) 733 : "ebx", "ecx", "edx", "memory"); 734 } 735 736 static inline void __monitor(const void *eax, unsigned long ecx, 737 unsigned long edx) 738 { 739 /* "monitor %eax, %ecx, %edx;" */ 740 asm volatile(".byte 0x0f, 0x01, 0xc8;" 741 :: "a" (eax), "c" (ecx), "d"(edx)); 742 } 743 744 static inline void __mwait(unsigned long eax, unsigned long ecx) 745 { 746 /* "mwait %eax, %ecx;" */ 747 asm volatile(".byte 0x0f, 0x01, 0xc9;" 748 :: "a" (eax), "c" (ecx)); 749 } 750 751 static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 752 { 753 trace_hardirqs_on(); 754 /* "mwait %eax, %ecx;" */ 755 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" 756 :: "a" (eax), "c" (ecx)); 757 } 758 759 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 760 761 extern void select_idle_routine(const struct cpuinfo_x86 *c); 762 extern void init_c1e_mask(void); 763 764 extern unsigned long boot_option_idle_override; 765 extern unsigned long idle_halt; 766 extern unsigned long idle_nomwait; 767 768 /* 769 * on systems with caches, caches must be flashed as the absolute 770 * last instruction before going into a suspended halt. Otherwise, 771 * dirty data can linger in the cache and become stale on resume, 772 * leading to strange errors. 773 * 774 * perform a variety of operations to guarantee that the compiler 775 * will not reorder instructions. wbinvd itself is serializing 776 * so the processor will not reorder. 777 * 778 * Systems without cache can just go into halt. 779 */ 780 static inline void wbinvd_halt(void) 781 { 782 mb(); 783 /* check for clflush to determine if wbinvd is legal */ 784 if (cpu_has_clflush) 785 asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); 786 else 787 while (1) 788 halt(); 789 } 790 791 extern void enable_sep_cpu(void); 792 extern int sysenter_setup(void); 793 794 /* Defined in head.S */ 795 extern struct desc_ptr early_gdt_descr; 796 797 extern void cpu_set_gdt(int); 798 extern void switch_to_new_gdt(int); 799 extern void load_percpu_segment(int); 800 extern void cpu_init(void); 801 802 static inline unsigned long get_debugctlmsr(void) 803 { 804 unsigned long debugctlmsr = 0; 805 806 #ifndef CONFIG_X86_DEBUGCTLMSR 807 if (boot_cpu_data.x86 < 6) 808 return 0; 809 #endif 810 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 811 812 return debugctlmsr; 813 } 814 815 static inline unsigned long get_debugctlmsr_on_cpu(int cpu) 816 { 817 u64 debugctlmsr = 0; 818 u32 val1, val2; 819 820 #ifndef CONFIG_X86_DEBUGCTLMSR 821 if (boot_cpu_data.x86 < 6) 822 return 0; 823 #endif 824 rdmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, &val1, &val2); 825 debugctlmsr = val1 | ((u64)val2 << 32); 826 827 return debugctlmsr; 828 } 829 830 static inline void update_debugctlmsr(unsigned long debugctlmsr) 831 { 832 #ifndef CONFIG_X86_DEBUGCTLMSR 833 if (boot_cpu_data.x86 < 6) 834 return; 835 #endif 836 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 837 } 838 839 static inline void update_debugctlmsr_on_cpu(int cpu, 840 unsigned long debugctlmsr) 841 { 842 #ifndef CONFIG_X86_DEBUGCTLMSR 843 if (boot_cpu_data.x86 < 6) 844 return; 845 #endif 846 wrmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, 847 (u32)((u64)debugctlmsr), 848 (u32)((u64)debugctlmsr >> 32)); 849 } 850 851 /* 852 * from system description table in BIOS. Mostly for MCA use, but 853 * others may find it useful: 854 */ 855 extern unsigned int machine_id; 856 extern unsigned int machine_submodel_id; 857 extern unsigned int BIOS_revision; 858 859 /* Boot loader type from the setup header: */ 860 extern int bootloader_type; 861 extern int bootloader_version; 862 863 extern char ignore_fpu_irq; 864 865 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 866 #define ARCH_HAS_PREFETCHW 867 #define ARCH_HAS_SPINLOCK_PREFETCH 868 869 #ifdef CONFIG_X86_32 870 # define BASE_PREFETCH ASM_NOP4 871 # define ARCH_HAS_PREFETCH 872 #else 873 # define BASE_PREFETCH "prefetcht0 (%1)" 874 #endif 875 876 /* 877 * Prefetch instructions for Pentium III (+) and AMD Athlon (+) 878 * 879 * It's not worth to care about 3dnow prefetches for the K6 880 * because they are microcoded there and very slow. 881 */ 882 static inline void prefetch(const void *x) 883 { 884 alternative_input(BASE_PREFETCH, 885 "prefetchnta (%1)", 886 X86_FEATURE_XMM, 887 "r" (x)); 888 } 889 890 /* 891 * 3dnow prefetch to get an exclusive cache line. 892 * Useful for spinlocks to avoid one state transition in the 893 * cache coherency protocol: 894 */ 895 static inline void prefetchw(const void *x) 896 { 897 alternative_input(BASE_PREFETCH, 898 "prefetchw (%1)", 899 X86_FEATURE_3DNOW, 900 "r" (x)); 901 } 902 903 static inline void spin_lock_prefetch(const void *x) 904 { 905 prefetchw(x); 906 } 907 908 #ifdef CONFIG_X86_32 909 /* 910 * User space process size: 3GB (default). 911 */ 912 #define TASK_SIZE PAGE_OFFSET 913 #define TASK_SIZE_MAX TASK_SIZE 914 #define STACK_TOP TASK_SIZE 915 #define STACK_TOP_MAX STACK_TOP 916 917 #define INIT_THREAD { \ 918 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 919 .vm86_info = NULL, \ 920 .sysenter_cs = __KERNEL_CS, \ 921 .io_bitmap_ptr = NULL, \ 922 } 923 924 /* 925 * Note that the .io_bitmap member must be extra-big. This is because 926 * the CPU will access an additional byte beyond the end of the IO 927 * permission bitmap. The extra byte must be all 1 bits, and must 928 * be within the limit. 929 */ 930 #define INIT_TSS { \ 931 .x86_tss = { \ 932 .sp0 = sizeof(init_stack) + (long)&init_stack, \ 933 .ss0 = __KERNEL_DS, \ 934 .ss1 = __KERNEL_CS, \ 935 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ 936 }, \ 937 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ 938 } 939 940 extern unsigned long thread_saved_pc(struct task_struct *tsk); 941 942 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) 943 #define KSTK_TOP(info) \ 944 ({ \ 945 unsigned long *__ptr = (unsigned long *)(info); \ 946 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ 947 }) 948 949 /* 950 * The below -8 is to reserve 8 bytes on top of the ring0 stack. 951 * This is necessary to guarantee that the entire "struct pt_regs" 952 * is accessable even if the CPU haven't stored the SS/ESP registers 953 * on the stack (interrupt gate does not save these registers 954 * when switching to the same priv ring). 955 * Therefore beware: accessing the ss/esp fields of the 956 * "struct pt_regs" is possible, but they may contain the 957 * completely wrong values. 958 */ 959 #define task_pt_regs(task) \ 960 ({ \ 961 struct pt_regs *__regs__; \ 962 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ 963 __regs__ - 1; \ 964 }) 965 966 #define KSTK_ESP(task) (task_pt_regs(task)->sp) 967 968 #else 969 /* 970 * User space process size. 47bits minus one guard page. 971 */ 972 #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) 973 974 /* This decides where the kernel will search for a free chunk of vm 975 * space during mmap's. 976 */ 977 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 978 0xc0000000 : 0xFFFFe000) 979 980 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 981 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 982 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 983 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 984 985 #define STACK_TOP TASK_SIZE 986 #define STACK_TOP_MAX TASK_SIZE_MAX 987 988 #define INIT_THREAD { \ 989 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 990 } 991 992 #define INIT_TSS { \ 993 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 994 } 995 996 /* 997 * Return saved PC of a blocked thread. 998 * What is this good for? it will be always the scheduler or ret_from_fork. 999 */ 1000 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) 1001 1002 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 1003 extern unsigned long KSTK_ESP(struct task_struct *task); 1004 #endif /* CONFIG_X86_64 */ 1005 1006 extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 1007 unsigned long new_sp); 1008 1009 /* 1010 * This decides where the kernel will search for a free chunk of vm 1011 * space during mmap's. 1012 */ 1013 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) 1014 1015 #define KSTK_EIP(task) (task_pt_regs(task)->ip) 1016 1017 /* Get/set a process' ability to use the timestamp counter instruction */ 1018 #define GET_TSC_CTL(adr) get_tsc_mode((adr)) 1019 #define SET_TSC_CTL(val) set_tsc_mode((val)) 1020 1021 extern int get_tsc_mode(unsigned long adr); 1022 extern int set_tsc_mode(unsigned int val); 1023 1024 extern int amd_get_nb_id(int cpu); 1025 1026 struct aperfmperf { 1027 u64 aperf, mperf; 1028 }; 1029 1030 static inline void get_aperfmperf(struct aperfmperf *am) 1031 { 1032 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF)); 1033 1034 rdmsrl(MSR_IA32_APERF, am->aperf); 1035 rdmsrl(MSR_IA32_MPERF, am->mperf); 1036 } 1037 1038 #define APERFMPERF_SHIFT 10 1039 1040 static inline 1041 unsigned long calc_aperfmperf_ratio(struct aperfmperf *old, 1042 struct aperfmperf *new) 1043 { 1044 u64 aperf = new->aperf - old->aperf; 1045 u64 mperf = new->mperf - old->mperf; 1046 unsigned long ratio = aperf; 1047 1048 mperf >>= APERFMPERF_SHIFT; 1049 if (mperf) 1050 ratio = div64_u64(aperf, mperf); 1051 1052 return ratio; 1053 } 1054 1055 #endif /* _ASM_X86_PROCESSOR_H */ 1056