1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PROCESSOR_H 3 #define _ASM_X86_PROCESSOR_H 4 5 #include <asm/processor-flags.h> 6 7 /* Forward declaration, a strange C thing */ 8 struct task_struct; 9 struct mm_struct; 10 struct vm86; 11 12 #include <asm/math_emu.h> 13 #include <asm/segment.h> 14 #include <asm/types.h> 15 #include <uapi/asm/sigcontext.h> 16 #include <asm/current.h> 17 #include <asm/cpufeatures.h> 18 #include <asm/page.h> 19 #include <asm/pgtable_types.h> 20 #include <asm/percpu.h> 21 #include <asm/msr.h> 22 #include <asm/desc_defs.h> 23 #include <asm/nops.h> 24 #include <asm/special_insns.h> 25 #include <asm/fpu/types.h> 26 #include <asm/unwind_hints.h> 27 28 #include <linux/personality.h> 29 #include <linux/cache.h> 30 #include <linux/threads.h> 31 #include <linux/math64.h> 32 #include <linux/err.h> 33 #include <linux/irqflags.h> 34 #include <linux/mem_encrypt.h> 35 36 /* 37 * We handle most unaligned accesses in hardware. On the other hand 38 * unaligned DMA can be quite expensive on some Nehalem processors. 39 * 40 * Based on this we disable the IP header alignment in network drivers. 41 */ 42 #define NET_IP_ALIGN 0 43 44 #define HBP_NUM 4 45 /* 46 * Default implementation of macro that returns current 47 * instruction pointer ("program counter"). 48 */ 49 static inline void *current_text_addr(void) 50 { 51 void *pc; 52 53 asm volatile("mov $1f, %0; 1:":"=r" (pc)); 54 55 return pc; 56 } 57 58 /* 59 * These alignment constraints are for performance in the vSMP case, 60 * but in the task_struct case we must also meet hardware imposed 61 * alignment requirements of the FPU state: 62 */ 63 #ifdef CONFIG_X86_VSMP 64 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 65 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 66 #else 67 # define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state) 68 # define ARCH_MIN_MMSTRUCT_ALIGN 0 69 #endif 70 71 enum tlb_infos { 72 ENTRIES, 73 NR_INFO 74 }; 75 76 extern u16 __read_mostly tlb_lli_4k[NR_INFO]; 77 extern u16 __read_mostly tlb_lli_2m[NR_INFO]; 78 extern u16 __read_mostly tlb_lli_4m[NR_INFO]; 79 extern u16 __read_mostly tlb_lld_4k[NR_INFO]; 80 extern u16 __read_mostly tlb_lld_2m[NR_INFO]; 81 extern u16 __read_mostly tlb_lld_4m[NR_INFO]; 82 extern u16 __read_mostly tlb_lld_1g[NR_INFO]; 83 84 /* 85 * CPU type and hardware bug flags. Kept separately for each CPU. 86 * Members of this structure are referenced in head_32.S, so think twice 87 * before touching them. [mj] 88 */ 89 90 struct cpuinfo_x86 { 91 __u8 x86; /* CPU family */ 92 __u8 x86_vendor; /* CPU vendor */ 93 __u8 x86_model; 94 __u8 x86_mask; 95 #ifdef CONFIG_X86_64 96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 97 int x86_tlbsize; 98 #endif 99 __u8 x86_virt_bits; 100 __u8 x86_phys_bits; 101 /* CPUID returned core id bits: */ 102 __u8 x86_coreid_bits; 103 __u8 cu_id; 104 /* Max extended CPUID function supported: */ 105 __u32 extended_cpuid_level; 106 /* Maximum supported CPUID level, -1=no CPUID: */ 107 int cpuid_level; 108 __u32 x86_capability[NCAPINTS + NBUGINTS]; 109 char x86_vendor_id[16]; 110 char x86_model_id[64]; 111 /* in KB - valid for CPUS which support this call: */ 112 int x86_cache_size; 113 int x86_cache_alignment; /* In bytes */ 114 /* Cache QoS architectural values: */ 115 int x86_cache_max_rmid; /* max index */ 116 int x86_cache_occ_scale; /* scale to bytes */ 117 int x86_power; 118 unsigned long loops_per_jiffy; 119 /* cpuid returned max cores value: */ 120 u16 x86_max_cores; 121 u16 apicid; 122 u16 initial_apicid; 123 u16 x86_clflush_size; 124 /* number of cores as seen by the OS: */ 125 u16 booted_cores; 126 /* Physical processor id: */ 127 u16 phys_proc_id; 128 /* Logical processor id: */ 129 u16 logical_proc_id; 130 /* Core id: */ 131 u16 cpu_core_id; 132 /* Index into per_cpu list: */ 133 u16 cpu_index; 134 u32 microcode; 135 } __randomize_layout; 136 137 struct cpuid_regs { 138 u32 eax, ebx, ecx, edx; 139 }; 140 141 enum cpuid_regs_idx { 142 CPUID_EAX = 0, 143 CPUID_EBX, 144 CPUID_ECX, 145 CPUID_EDX, 146 }; 147 148 #define X86_VENDOR_INTEL 0 149 #define X86_VENDOR_CYRIX 1 150 #define X86_VENDOR_AMD 2 151 #define X86_VENDOR_UMC 3 152 #define X86_VENDOR_CENTAUR 5 153 #define X86_VENDOR_TRANSMETA 7 154 #define X86_VENDOR_NSC 8 155 #define X86_VENDOR_NUM 9 156 157 #define X86_VENDOR_UNKNOWN 0xff 158 159 /* 160 * capabilities of CPUs 161 */ 162 extern struct cpuinfo_x86 boot_cpu_data; 163 extern struct cpuinfo_x86 new_cpu_data; 164 165 extern struct tss_struct doublefault_tss; 166 extern __u32 cpu_caps_cleared[NCAPINTS]; 167 extern __u32 cpu_caps_set[NCAPINTS]; 168 169 #ifdef CONFIG_SMP 170 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); 171 #define cpu_data(cpu) per_cpu(cpu_info, cpu) 172 #else 173 #define cpu_info boot_cpu_data 174 #define cpu_data(cpu) boot_cpu_data 175 #endif 176 177 extern const struct seq_operations cpuinfo_op; 178 179 #define cache_line_size() (boot_cpu_data.x86_cache_alignment) 180 181 extern void cpu_detect(struct cpuinfo_x86 *c); 182 183 extern void early_cpu_init(void); 184 extern void identify_boot_cpu(void); 185 extern void identify_secondary_cpu(struct cpuinfo_x86 *); 186 extern void print_cpu_info(struct cpuinfo_x86 *); 187 void print_cpu_msr(struct cpuinfo_x86 *); 188 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); 189 extern u32 get_scattered_cpuid_leaf(unsigned int level, 190 unsigned int sub_leaf, 191 enum cpuid_regs_idx reg); 192 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); 193 extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); 194 195 extern void detect_extended_topology(struct cpuinfo_x86 *c); 196 extern void detect_ht(struct cpuinfo_x86 *c); 197 198 #ifdef CONFIG_X86_32 199 extern int have_cpuid_p(void); 200 #else 201 static inline int have_cpuid_p(void) 202 { 203 return 1; 204 } 205 #endif 206 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 207 unsigned int *ecx, unsigned int *edx) 208 { 209 /* ecx is often an input as well as an output. */ 210 asm volatile("cpuid" 211 : "=a" (*eax), 212 "=b" (*ebx), 213 "=c" (*ecx), 214 "=d" (*edx) 215 : "0" (*eax), "2" (*ecx) 216 : "memory"); 217 } 218 219 #define native_cpuid_reg(reg) \ 220 static inline unsigned int native_cpuid_##reg(unsigned int op) \ 221 { \ 222 unsigned int eax = op, ebx, ecx = 0, edx; \ 223 \ 224 native_cpuid(&eax, &ebx, &ecx, &edx); \ 225 \ 226 return reg; \ 227 } 228 229 /* 230 * Native CPUID functions returning a single datum. 231 */ 232 native_cpuid_reg(eax) 233 native_cpuid_reg(ebx) 234 native_cpuid_reg(ecx) 235 native_cpuid_reg(edx) 236 237 /* 238 * Friendlier CR3 helpers. 239 */ 240 static inline unsigned long read_cr3_pa(void) 241 { 242 return __read_cr3() & CR3_ADDR_MASK; 243 } 244 245 static inline unsigned long native_read_cr3_pa(void) 246 { 247 return __native_read_cr3() & CR3_ADDR_MASK; 248 } 249 250 static inline void load_cr3(pgd_t *pgdir) 251 { 252 write_cr3(__sme_pa(pgdir)); 253 } 254 255 #ifdef CONFIG_X86_32 256 /* This is the TSS defined by the hardware. */ 257 struct x86_hw_tss { 258 unsigned short back_link, __blh; 259 unsigned long sp0; 260 unsigned short ss0, __ss0h; 261 unsigned long sp1; 262 263 /* 264 * We don't use ring 1, so ss1 is a convenient scratch space in 265 * the same cacheline as sp0. We use ss1 to cache the value in 266 * MSR_IA32_SYSENTER_CS. When we context switch 267 * MSR_IA32_SYSENTER_CS, we first check if the new value being 268 * written matches ss1, and, if it's not, then we wrmsr the new 269 * value and update ss1. 270 * 271 * The only reason we context switch MSR_IA32_SYSENTER_CS is 272 * that we set it to zero in vm86 tasks to avoid corrupting the 273 * stack if we were to go through the sysenter path from vm86 274 * mode. 275 */ 276 unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ 277 278 unsigned short __ss1h; 279 unsigned long sp2; 280 unsigned short ss2, __ss2h; 281 unsigned long __cr3; 282 unsigned long ip; 283 unsigned long flags; 284 unsigned long ax; 285 unsigned long cx; 286 unsigned long dx; 287 unsigned long bx; 288 unsigned long sp; 289 unsigned long bp; 290 unsigned long si; 291 unsigned long di; 292 unsigned short es, __esh; 293 unsigned short cs, __csh; 294 unsigned short ss, __ssh; 295 unsigned short ds, __dsh; 296 unsigned short fs, __fsh; 297 unsigned short gs, __gsh; 298 unsigned short ldt, __ldth; 299 unsigned short trace; 300 unsigned short io_bitmap_base; 301 302 } __attribute__((packed)); 303 #else 304 struct x86_hw_tss { 305 u32 reserved1; 306 u64 sp0; 307 u64 sp1; 308 u64 sp2; 309 u64 reserved2; 310 u64 ist[7]; 311 u32 reserved3; 312 u32 reserved4; 313 u16 reserved5; 314 u16 io_bitmap_base; 315 316 } __attribute__((packed)); 317 #endif 318 319 /* 320 * IO-bitmap sizes: 321 */ 322 #define IO_BITMAP_BITS 65536 323 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) 324 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 325 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 326 #define INVALID_IO_BITMAP_OFFSET 0x8000 327 328 struct tss_struct { 329 /* 330 * The hardware state: 331 */ 332 struct x86_hw_tss x86_tss; 333 334 /* 335 * The extra 1 is there because the CPU will access an 336 * additional byte beyond the end of the IO permission 337 * bitmap. The extra byte must be all 1 bits, and must 338 * be within the limit. 339 */ 340 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 341 342 #ifdef CONFIG_X86_32 343 /* 344 * Space for the temporary SYSENTER stack. 345 */ 346 unsigned long SYSENTER_stack_canary; 347 unsigned long SYSENTER_stack[64]; 348 #endif 349 350 } ____cacheline_aligned; 351 352 DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); 353 354 /* 355 * sizeof(unsigned long) coming from an extra "long" at the end 356 * of the iobitmap. 357 * 358 * -1? seg base+limit should be pointing to the address of the 359 * last valid byte 360 */ 361 #define __KERNEL_TSS_LIMIT \ 362 (IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1) 363 364 #ifdef CONFIG_X86_32 365 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); 366 #endif 367 368 /* 369 * Save the original ist values for checking stack pointers during debugging 370 */ 371 struct orig_ist { 372 unsigned long ist[7]; 373 }; 374 375 #ifdef CONFIG_X86_64 376 DECLARE_PER_CPU(struct orig_ist, orig_ist); 377 378 union irq_stack_union { 379 char irq_stack[IRQ_STACK_SIZE]; 380 /* 381 * GCC hardcodes the stack canary as %gs:40. Since the 382 * irq_stack is the object at %gs:0, we reserve the bottom 383 * 48 bytes of the irq stack for the canary. 384 */ 385 struct { 386 char gs_base[40]; 387 unsigned long stack_canary; 388 }; 389 }; 390 391 DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible; 392 DECLARE_INIT_PER_CPU(irq_stack_union); 393 394 DECLARE_PER_CPU(char *, irq_stack_ptr); 395 DECLARE_PER_CPU(unsigned int, irq_count); 396 extern asmlinkage void ignore_sysret(void); 397 #else /* X86_64 */ 398 #ifdef CONFIG_CC_STACKPROTECTOR 399 /* 400 * Make sure stack canary segment base is cached-aligned: 401 * "For Intel Atom processors, avoid non zero segment base address 402 * that is not aligned to cache line boundary at all cost." 403 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) 404 */ 405 struct stack_canary { 406 char __pad[20]; /* canary at %gs:20 */ 407 unsigned long canary; 408 }; 409 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 410 #endif 411 /* 412 * per-CPU IRQ handling stacks 413 */ 414 struct irq_stack { 415 u32 stack[THREAD_SIZE/sizeof(u32)]; 416 } __aligned(THREAD_SIZE); 417 418 DECLARE_PER_CPU(struct irq_stack *, hardirq_stack); 419 DECLARE_PER_CPU(struct irq_stack *, softirq_stack); 420 #endif /* X86_64 */ 421 422 extern unsigned int fpu_kernel_xstate_size; 423 extern unsigned int fpu_user_xstate_size; 424 425 struct perf_event; 426 427 typedef struct { 428 unsigned long seg; 429 } mm_segment_t; 430 431 struct thread_struct { 432 /* Cached TLS descriptors: */ 433 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 434 #ifdef CONFIG_X86_32 435 unsigned long sp0; 436 #endif 437 unsigned long sp; 438 #ifdef CONFIG_X86_32 439 unsigned long sysenter_cs; 440 #else 441 unsigned short es; 442 unsigned short ds; 443 unsigned short fsindex; 444 unsigned short gsindex; 445 #endif 446 447 u32 status; /* thread synchronous flags */ 448 449 #ifdef CONFIG_X86_64 450 unsigned long fsbase; 451 unsigned long gsbase; 452 #else 453 /* 454 * XXX: this could presumably be unsigned short. Alternatively, 455 * 32-bit kernels could be taught to use fsindex instead. 456 */ 457 unsigned long fs; 458 unsigned long gs; 459 #endif 460 461 /* Save middle states of ptrace breakpoints */ 462 struct perf_event *ptrace_bps[HBP_NUM]; 463 /* Debug status used for traps, single steps, etc... */ 464 unsigned long debugreg6; 465 /* Keep track of the exact dr7 value set by the user */ 466 unsigned long ptrace_dr7; 467 /* Fault info: */ 468 unsigned long cr2; 469 unsigned long trap_nr; 470 unsigned long error_code; 471 #ifdef CONFIG_VM86 472 /* Virtual 86 mode info */ 473 struct vm86 *vm86; 474 #endif 475 /* IO permissions: */ 476 unsigned long *io_bitmap_ptr; 477 unsigned long iopl; 478 /* Max allowed port in the bitmap, in bytes: */ 479 unsigned io_bitmap_max; 480 481 mm_segment_t addr_limit; 482 483 unsigned int sig_on_uaccess_err:1; 484 unsigned int uaccess_err:1; /* uaccess failed */ 485 486 /* Floating point and extended processor state */ 487 struct fpu fpu; 488 /* 489 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at 490 * the end. 491 */ 492 }; 493 494 /* 495 * Thread-synchronous status. 496 * 497 * This is different from the flags in that nobody else 498 * ever touches our thread-synchronous status, so we don't 499 * have to worry about atomic accesses. 500 */ 501 #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ 502 503 /* 504 * Set IOPL bits in EFLAGS from given mask 505 */ 506 static inline void native_set_iopl_mask(unsigned mask) 507 { 508 #ifdef CONFIG_X86_32 509 unsigned int reg; 510 511 asm volatile ("pushfl;" 512 "popl %0;" 513 "andl %1, %0;" 514 "orl %2, %0;" 515 "pushl %0;" 516 "popfl" 517 : "=&r" (reg) 518 : "i" (~X86_EFLAGS_IOPL), "r" (mask)); 519 #endif 520 } 521 522 static inline void 523 native_load_sp0(unsigned long sp0) 524 { 525 this_cpu_write(cpu_tss.x86_tss.sp0, sp0); 526 } 527 528 static inline void native_swapgs(void) 529 { 530 #ifdef CONFIG_X86_64 531 asm volatile("swapgs" ::: "memory"); 532 #endif 533 } 534 535 static inline unsigned long current_top_of_stack(void) 536 { 537 #ifdef CONFIG_X86_64 538 return this_cpu_read_stable(cpu_tss.x86_tss.sp0); 539 #else 540 /* sp0 on x86_32 is special in and around vm86 mode. */ 541 return this_cpu_read_stable(cpu_current_top_of_stack); 542 #endif 543 } 544 545 static inline bool on_thread_stack(void) 546 { 547 return (unsigned long)(current_top_of_stack() - 548 current_stack_pointer) < THREAD_SIZE; 549 } 550 551 #ifdef CONFIG_PARAVIRT 552 #include <asm/paravirt.h> 553 #else 554 #define __cpuid native_cpuid 555 556 static inline void load_sp0(unsigned long sp0) 557 { 558 native_load_sp0(sp0); 559 } 560 561 #define set_iopl_mask native_set_iopl_mask 562 #endif /* CONFIG_PARAVIRT */ 563 564 /* Free all resources held by a thread. */ 565 extern void release_thread(struct task_struct *); 566 567 unsigned long get_wchan(struct task_struct *p); 568 569 /* 570 * Generic CPUID function 571 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 572 * resulting in stale register contents being returned. 573 */ 574 static inline void cpuid(unsigned int op, 575 unsigned int *eax, unsigned int *ebx, 576 unsigned int *ecx, unsigned int *edx) 577 { 578 *eax = op; 579 *ecx = 0; 580 __cpuid(eax, ebx, ecx, edx); 581 } 582 583 /* Some CPUID calls want 'count' to be placed in ecx */ 584 static inline void cpuid_count(unsigned int op, int count, 585 unsigned int *eax, unsigned int *ebx, 586 unsigned int *ecx, unsigned int *edx) 587 { 588 *eax = op; 589 *ecx = count; 590 __cpuid(eax, ebx, ecx, edx); 591 } 592 593 /* 594 * CPUID functions returning a single datum 595 */ 596 static inline unsigned int cpuid_eax(unsigned int op) 597 { 598 unsigned int eax, ebx, ecx, edx; 599 600 cpuid(op, &eax, &ebx, &ecx, &edx); 601 602 return eax; 603 } 604 605 static inline unsigned int cpuid_ebx(unsigned int op) 606 { 607 unsigned int eax, ebx, ecx, edx; 608 609 cpuid(op, &eax, &ebx, &ecx, &edx); 610 611 return ebx; 612 } 613 614 static inline unsigned int cpuid_ecx(unsigned int op) 615 { 616 unsigned int eax, ebx, ecx, edx; 617 618 cpuid(op, &eax, &ebx, &ecx, &edx); 619 620 return ecx; 621 } 622 623 static inline unsigned int cpuid_edx(unsigned int op) 624 { 625 unsigned int eax, ebx, ecx, edx; 626 627 cpuid(op, &eax, &ebx, &ecx, &edx); 628 629 return edx; 630 } 631 632 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 633 static __always_inline void rep_nop(void) 634 { 635 asm volatile("rep; nop" ::: "memory"); 636 } 637 638 static __always_inline void cpu_relax(void) 639 { 640 rep_nop(); 641 } 642 643 /* 644 * This function forces the icache and prefetched instruction stream to 645 * catch up with reality in two very specific cases: 646 * 647 * a) Text was modified using one virtual address and is about to be executed 648 * from the same physical page at a different virtual address. 649 * 650 * b) Text was modified on a different CPU, may subsequently be 651 * executed on this CPU, and you want to make sure the new version 652 * gets executed. This generally means you're calling this in a IPI. 653 * 654 * If you're calling this for a different reason, you're probably doing 655 * it wrong. 656 */ 657 static inline void sync_core(void) 658 { 659 /* 660 * There are quite a few ways to do this. IRET-to-self is nice 661 * because it works on every CPU, at any CPL (so it's compatible 662 * with paravirtualization), and it never exits to a hypervisor. 663 * The only down sides are that it's a bit slow (it seems to be 664 * a bit more than 2x slower than the fastest options) and that 665 * it unmasks NMIs. The "push %cs" is needed because, in 666 * paravirtual environments, __KERNEL_CS may not be a valid CS 667 * value when we do IRET directly. 668 * 669 * In case NMI unmasking or performance ever becomes a problem, 670 * the next best option appears to be MOV-to-CR2 and an 671 * unconditional jump. That sequence also works on all CPUs, 672 * but it will fault at CPL3 (i.e. Xen PV). 673 * 674 * CPUID is the conventional way, but it's nasty: it doesn't 675 * exist on some 486-like CPUs, and it usually exits to a 676 * hypervisor. 677 * 678 * Like all of Linux's memory ordering operations, this is a 679 * compiler barrier as well. 680 */ 681 #ifdef CONFIG_X86_32 682 asm volatile ( 683 "pushfl\n\t" 684 "pushl %%cs\n\t" 685 "pushl $1f\n\t" 686 "iret\n\t" 687 "1:" 688 : ASM_CALL_CONSTRAINT : : "memory"); 689 #else 690 unsigned int tmp; 691 692 asm volatile ( 693 UNWIND_HINT_SAVE 694 "mov %%ss, %0\n\t" 695 "pushq %q0\n\t" 696 "pushq %%rsp\n\t" 697 "addq $8, (%%rsp)\n\t" 698 "pushfq\n\t" 699 "mov %%cs, %0\n\t" 700 "pushq %q0\n\t" 701 "pushq $1f\n\t" 702 "iretq\n\t" 703 UNWIND_HINT_RESTORE 704 "1:" 705 : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); 706 #endif 707 } 708 709 extern void select_idle_routine(const struct cpuinfo_x86 *c); 710 extern void amd_e400_c1e_apic_setup(void); 711 712 extern unsigned long boot_option_idle_override; 713 714 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 715 IDLE_POLL}; 716 717 extern void enable_sep_cpu(void); 718 extern int sysenter_setup(void); 719 720 extern void early_trap_init(void); 721 void early_trap_pf_init(void); 722 723 /* Defined in head.S */ 724 extern struct desc_ptr early_gdt_descr; 725 726 extern void cpu_set_gdt(int); 727 extern void switch_to_new_gdt(int); 728 extern void load_direct_gdt(int); 729 extern void load_fixmap_gdt(int); 730 extern void load_percpu_segment(int); 731 extern void cpu_init(void); 732 733 static inline unsigned long get_debugctlmsr(void) 734 { 735 unsigned long debugctlmsr = 0; 736 737 #ifndef CONFIG_X86_DEBUGCTLMSR 738 if (boot_cpu_data.x86 < 6) 739 return 0; 740 #endif 741 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 742 743 return debugctlmsr; 744 } 745 746 static inline void update_debugctlmsr(unsigned long debugctlmsr) 747 { 748 #ifndef CONFIG_X86_DEBUGCTLMSR 749 if (boot_cpu_data.x86 < 6) 750 return; 751 #endif 752 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 753 } 754 755 extern void set_task_blockstep(struct task_struct *task, bool on); 756 757 /* Boot loader type from the setup header: */ 758 extern int bootloader_type; 759 extern int bootloader_version; 760 761 extern char ignore_fpu_irq; 762 763 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 764 #define ARCH_HAS_PREFETCHW 765 #define ARCH_HAS_SPINLOCK_PREFETCH 766 767 #ifdef CONFIG_X86_32 768 # define BASE_PREFETCH "" 769 # define ARCH_HAS_PREFETCH 770 #else 771 # define BASE_PREFETCH "prefetcht0 %P1" 772 #endif 773 774 /* 775 * Prefetch instructions for Pentium III (+) and AMD Athlon (+) 776 * 777 * It's not worth to care about 3dnow prefetches for the K6 778 * because they are microcoded there and very slow. 779 */ 780 static inline void prefetch(const void *x) 781 { 782 alternative_input(BASE_PREFETCH, "prefetchnta %P1", 783 X86_FEATURE_XMM, 784 "m" (*(const char *)x)); 785 } 786 787 /* 788 * 3dnow prefetch to get an exclusive cache line. 789 * Useful for spinlocks to avoid one state transition in the 790 * cache coherency protocol: 791 */ 792 static inline void prefetchw(const void *x) 793 { 794 alternative_input(BASE_PREFETCH, "prefetchw %P1", 795 X86_FEATURE_3DNOWPREFETCH, 796 "m" (*(const char *)x)); 797 } 798 799 static inline void spin_lock_prefetch(const void *x) 800 { 801 prefetchw(x); 802 } 803 804 #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ 805 TOP_OF_KERNEL_STACK_PADDING) 806 807 #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) 808 809 #define task_pt_regs(task) \ 810 ({ \ 811 unsigned long __ptr = (unsigned long)task_stack_page(task); \ 812 __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ 813 ((struct pt_regs *)__ptr) - 1; \ 814 }) 815 816 #ifdef CONFIG_X86_32 817 /* 818 * User space process size: 3GB (default). 819 */ 820 #define IA32_PAGE_OFFSET PAGE_OFFSET 821 #define TASK_SIZE PAGE_OFFSET 822 #define TASK_SIZE_LOW TASK_SIZE 823 #define TASK_SIZE_MAX TASK_SIZE 824 #define DEFAULT_MAP_WINDOW TASK_SIZE 825 #define STACK_TOP TASK_SIZE 826 #define STACK_TOP_MAX STACK_TOP 827 828 #define INIT_THREAD { \ 829 .sp0 = TOP_OF_INIT_STACK, \ 830 .sysenter_cs = __KERNEL_CS, \ 831 .io_bitmap_ptr = NULL, \ 832 .addr_limit = KERNEL_DS, \ 833 } 834 835 #define KSTK_ESP(task) (task_pt_regs(task)->sp) 836 837 #else 838 /* 839 * User space process size. 47bits minus one guard page. The guard 840 * page is necessary on Intel CPUs: if a SYSCALL instruction is at 841 * the highest possible canonical userspace address, then that 842 * syscall will enter the kernel with a non-canonical return 843 * address, and SYSRET will explode dangerously. We avoid this 844 * particular problem by preventing anything from being mapped 845 * at the maximum canonical address. 846 */ 847 #define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) 848 849 #define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE) 850 851 /* This decides where the kernel will search for a free chunk of vm 852 * space during mmap's. 853 */ 854 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 855 0xc0000000 : 0xFFFFe000) 856 857 #define TASK_SIZE_LOW (test_thread_flag(TIF_ADDR32) ? \ 858 IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW) 859 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ 860 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 861 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ 862 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 863 864 #define STACK_TOP TASK_SIZE_LOW 865 #define STACK_TOP_MAX TASK_SIZE_MAX 866 867 #define INIT_THREAD { \ 868 .addr_limit = KERNEL_DS, \ 869 } 870 871 extern unsigned long KSTK_ESP(struct task_struct *task); 872 873 #endif /* CONFIG_X86_64 */ 874 875 extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 876 unsigned long new_sp); 877 878 /* 879 * This decides where the kernel will search for a free chunk of vm 880 * space during mmap's. 881 */ 882 #define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3)) 883 #define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW) 884 885 #define KSTK_EIP(task) (task_pt_regs(task)->ip) 886 887 /* Get/set a process' ability to use the timestamp counter instruction */ 888 #define GET_TSC_CTL(adr) get_tsc_mode((adr)) 889 #define SET_TSC_CTL(val) set_tsc_mode((val)) 890 891 extern int get_tsc_mode(unsigned long adr); 892 extern int set_tsc_mode(unsigned int val); 893 894 DECLARE_PER_CPU(u64, msr_misc_features_shadow); 895 896 /* Register/unregister a process' MPX related resource */ 897 #define MPX_ENABLE_MANAGEMENT() mpx_enable_management() 898 #define MPX_DISABLE_MANAGEMENT() mpx_disable_management() 899 900 #ifdef CONFIG_X86_INTEL_MPX 901 extern int mpx_enable_management(void); 902 extern int mpx_disable_management(void); 903 #else 904 static inline int mpx_enable_management(void) 905 { 906 return -EINVAL; 907 } 908 static inline int mpx_disable_management(void) 909 { 910 return -EINVAL; 911 } 912 #endif /* CONFIG_X86_INTEL_MPX */ 913 914 #ifdef CONFIG_CPU_SUP_AMD 915 extern u16 amd_get_nb_id(int cpu); 916 extern u32 amd_get_nodes_per_socket(void); 917 #else 918 static inline u16 amd_get_nb_id(int cpu) { return 0; } 919 static inline u32 amd_get_nodes_per_socket(void) { return 0; } 920 #endif 921 922 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) 923 { 924 uint32_t base, eax, signature[3]; 925 926 for (base = 0x40000000; base < 0x40010000; base += 0x100) { 927 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); 928 929 if (!memcmp(sig, signature, 12) && 930 (leaves == 0 || ((eax - base) >= leaves))) 931 return base; 932 } 933 934 return 0; 935 } 936 937 extern unsigned long arch_align_stack(unsigned long sp); 938 extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 939 940 void default_idle(void); 941 #ifdef CONFIG_XEN 942 bool xen_set_default_idle(void); 943 #else 944 #define xen_set_default_idle 0 945 #endif 946 947 void stop_this_cpu(void *dummy); 948 void df_debug(struct pt_regs *regs, long error_code); 949 #endif /* _ASM_X86_PROCESSOR_H */ 950