1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PROCESSOR_H 3 #define _ASM_X86_PROCESSOR_H 4 5 #include <asm/processor-flags.h> 6 7 /* Forward declaration, a strange C thing */ 8 struct task_struct; 9 struct mm_struct; 10 struct io_bitmap; 11 struct vm86; 12 13 #include <asm/math_emu.h> 14 #include <asm/segment.h> 15 #include <asm/types.h> 16 #include <uapi/asm/sigcontext.h> 17 #include <asm/current.h> 18 #include <asm/cpufeatures.h> 19 #include <asm/page.h> 20 #include <asm/pgtable_types.h> 21 #include <asm/percpu.h> 22 #include <asm/msr.h> 23 #include <asm/desc_defs.h> 24 #include <asm/nops.h> 25 #include <asm/special_insns.h> 26 #include <asm/fpu/types.h> 27 #include <asm/unwind_hints.h> 28 #include <asm/vmxfeatures.h> 29 #include <asm/vdso/processor.h> 30 31 #include <linux/personality.h> 32 #include <linux/cache.h> 33 #include <linux/threads.h> 34 #include <linux/math64.h> 35 #include <linux/err.h> 36 #include <linux/irqflags.h> 37 #include <linux/mem_encrypt.h> 38 39 /* 40 * We handle most unaligned accesses in hardware. On the other hand 41 * unaligned DMA can be quite expensive on some Nehalem processors. 42 * 43 * Based on this we disable the IP header alignment in network drivers. 44 */ 45 #define NET_IP_ALIGN 0 46 47 #define HBP_NUM 4 48 49 /* 50 * These alignment constraints are for performance in the vSMP case, 51 * but in the task_struct case we must also meet hardware imposed 52 * alignment requirements of the FPU state: 53 */ 54 #ifdef CONFIG_X86_VSMP 55 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 56 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) 57 #else 58 # define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state) 59 # define ARCH_MIN_MMSTRUCT_ALIGN 0 60 #endif 61 62 enum tlb_infos { 63 ENTRIES, 64 NR_INFO 65 }; 66 67 extern u16 __read_mostly tlb_lli_4k[NR_INFO]; 68 extern u16 __read_mostly tlb_lli_2m[NR_INFO]; 69 extern u16 __read_mostly tlb_lli_4m[NR_INFO]; 70 extern u16 __read_mostly tlb_lld_4k[NR_INFO]; 71 extern u16 __read_mostly tlb_lld_2m[NR_INFO]; 72 extern u16 __read_mostly tlb_lld_4m[NR_INFO]; 73 extern u16 __read_mostly tlb_lld_1g[NR_INFO]; 74 75 /* 76 * CPU type and hardware bug flags. Kept separately for each CPU. 77 * Members of this structure are referenced in head_32.S, so think twice 78 * before touching them. [mj] 79 */ 80 81 struct cpuinfo_x86 { 82 __u8 x86; /* CPU family */ 83 __u8 x86_vendor; /* CPU vendor */ 84 __u8 x86_model; 85 __u8 x86_stepping; 86 #ifdef CONFIG_X86_64 87 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 88 int x86_tlbsize; 89 #endif 90 #ifdef CONFIG_X86_VMX_FEATURE_NAMES 91 __u32 vmx_capability[NVMXINTS]; 92 #endif 93 __u8 x86_virt_bits; 94 __u8 x86_phys_bits; 95 /* CPUID returned core id bits: */ 96 __u8 x86_coreid_bits; 97 __u8 cu_id; 98 /* Max extended CPUID function supported: */ 99 __u32 extended_cpuid_level; 100 /* Maximum supported CPUID level, -1=no CPUID: */ 101 int cpuid_level; 102 /* 103 * Align to size of unsigned long because the x86_capability array 104 * is passed to bitops which require the alignment. Use unnamed 105 * union to enforce the array is aligned to size of unsigned long. 106 */ 107 union { 108 __u32 x86_capability[NCAPINTS + NBUGINTS]; 109 unsigned long x86_capability_alignment; 110 }; 111 char x86_vendor_id[16]; 112 char x86_model_id[64]; 113 /* in KB - valid for CPUS which support this call: */ 114 unsigned int x86_cache_size; 115 int x86_cache_alignment; /* In bytes */ 116 /* Cache QoS architectural values, valid only on the BSP: */ 117 int x86_cache_max_rmid; /* max index */ 118 int x86_cache_occ_scale; /* scale to bytes */ 119 int x86_cache_mbm_width_offset; 120 int x86_power; 121 unsigned long loops_per_jiffy; 122 /* protected processor identification number */ 123 u64 ppin; 124 /* cpuid returned max cores value: */ 125 u16 x86_max_cores; 126 u16 apicid; 127 u16 initial_apicid; 128 u16 x86_clflush_size; 129 /* number of cores as seen by the OS: */ 130 u16 booted_cores; 131 /* Physical processor id: */ 132 u16 phys_proc_id; 133 /* Logical processor id: */ 134 u16 logical_proc_id; 135 /* Core id: */ 136 u16 cpu_core_id; 137 u16 cpu_die_id; 138 u16 logical_die_id; 139 /* Index into per_cpu list: */ 140 u16 cpu_index; 141 /* Is SMT active on this core? */ 142 bool smt_active; 143 u32 microcode; 144 /* Address space bits used by the cache internally */ 145 u8 x86_cache_bits; 146 unsigned initialized : 1; 147 } __randomize_layout; 148 149 struct cpuid_regs { 150 u32 eax, ebx, ecx, edx; 151 }; 152 153 enum cpuid_regs_idx { 154 CPUID_EAX = 0, 155 CPUID_EBX, 156 CPUID_ECX, 157 CPUID_EDX, 158 }; 159 160 #define X86_VENDOR_INTEL 0 161 #define X86_VENDOR_CYRIX 1 162 #define X86_VENDOR_AMD 2 163 #define X86_VENDOR_UMC 3 164 #define X86_VENDOR_CENTAUR 5 165 #define X86_VENDOR_TRANSMETA 7 166 #define X86_VENDOR_NSC 8 167 #define X86_VENDOR_HYGON 9 168 #define X86_VENDOR_ZHAOXIN 10 169 #define X86_VENDOR_VORTEX 11 170 #define X86_VENDOR_NUM 12 171 172 #define X86_VENDOR_UNKNOWN 0xff 173 174 /* 175 * capabilities of CPUs 176 */ 177 extern struct cpuinfo_x86 boot_cpu_data; 178 extern struct cpuinfo_x86 new_cpu_data; 179 180 extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; 181 extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; 182 183 #ifdef CONFIG_SMP 184 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); 185 #define cpu_data(cpu) per_cpu(cpu_info, cpu) 186 #else 187 #define cpu_info boot_cpu_data 188 #define cpu_data(cpu) boot_cpu_data 189 #endif 190 191 extern const struct seq_operations cpuinfo_op; 192 193 #define cache_line_size() (boot_cpu_data.x86_cache_alignment) 194 195 extern void cpu_detect(struct cpuinfo_x86 *c); 196 197 static inline unsigned long long l1tf_pfn_limit(void) 198 { 199 return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); 200 } 201 202 extern void early_cpu_init(void); 203 extern void identify_boot_cpu(void); 204 extern void identify_secondary_cpu(struct cpuinfo_x86 *); 205 extern void print_cpu_info(struct cpuinfo_x86 *); 206 void print_cpu_msr(struct cpuinfo_x86 *); 207 208 #ifdef CONFIG_X86_32 209 extern int have_cpuid_p(void); 210 #else 211 static inline int have_cpuid_p(void) 212 { 213 return 1; 214 } 215 #endif 216 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, 217 unsigned int *ecx, unsigned int *edx) 218 { 219 /* ecx is often an input as well as an output. */ 220 asm volatile("cpuid" 221 : "=a" (*eax), 222 "=b" (*ebx), 223 "=c" (*ecx), 224 "=d" (*edx) 225 : "0" (*eax), "2" (*ecx) 226 : "memory"); 227 } 228 229 #define native_cpuid_reg(reg) \ 230 static inline unsigned int native_cpuid_##reg(unsigned int op) \ 231 { \ 232 unsigned int eax = op, ebx, ecx = 0, edx; \ 233 \ 234 native_cpuid(&eax, &ebx, &ecx, &edx); \ 235 \ 236 return reg; \ 237 } 238 239 /* 240 * Native CPUID functions returning a single datum. 241 */ 242 native_cpuid_reg(eax) 243 native_cpuid_reg(ebx) 244 native_cpuid_reg(ecx) 245 native_cpuid_reg(edx) 246 247 /* 248 * Friendlier CR3 helpers. 249 */ 250 static inline unsigned long read_cr3_pa(void) 251 { 252 return __read_cr3() & CR3_ADDR_MASK; 253 } 254 255 static inline unsigned long native_read_cr3_pa(void) 256 { 257 return __native_read_cr3() & CR3_ADDR_MASK; 258 } 259 260 static inline void load_cr3(pgd_t *pgdir) 261 { 262 write_cr3(__sme_pa(pgdir)); 263 } 264 265 /* 266 * Note that while the legacy 'TSS' name comes from 'Task State Segment', 267 * on modern x86 CPUs the TSS also holds information important to 64-bit mode, 268 * unrelated to the task-switch mechanism: 269 */ 270 #ifdef CONFIG_X86_32 271 /* This is the TSS defined by the hardware. */ 272 struct x86_hw_tss { 273 unsigned short back_link, __blh; 274 unsigned long sp0; 275 unsigned short ss0, __ss0h; 276 unsigned long sp1; 277 278 /* 279 * We don't use ring 1, so ss1 is a convenient scratch space in 280 * the same cacheline as sp0. We use ss1 to cache the value in 281 * MSR_IA32_SYSENTER_CS. When we context switch 282 * MSR_IA32_SYSENTER_CS, we first check if the new value being 283 * written matches ss1, and, if it's not, then we wrmsr the new 284 * value and update ss1. 285 * 286 * The only reason we context switch MSR_IA32_SYSENTER_CS is 287 * that we set it to zero in vm86 tasks to avoid corrupting the 288 * stack if we were to go through the sysenter path from vm86 289 * mode. 290 */ 291 unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ 292 293 unsigned short __ss1h; 294 unsigned long sp2; 295 unsigned short ss2, __ss2h; 296 unsigned long __cr3; 297 unsigned long ip; 298 unsigned long flags; 299 unsigned long ax; 300 unsigned long cx; 301 unsigned long dx; 302 unsigned long bx; 303 unsigned long sp; 304 unsigned long bp; 305 unsigned long si; 306 unsigned long di; 307 unsigned short es, __esh; 308 unsigned short cs, __csh; 309 unsigned short ss, __ssh; 310 unsigned short ds, __dsh; 311 unsigned short fs, __fsh; 312 unsigned short gs, __gsh; 313 unsigned short ldt, __ldth; 314 unsigned short trace; 315 unsigned short io_bitmap_base; 316 317 } __attribute__((packed)); 318 #else 319 struct x86_hw_tss { 320 u32 reserved1; 321 u64 sp0; 322 u64 sp1; 323 324 /* 325 * Since Linux does not use ring 2, the 'sp2' slot is unused by 326 * hardware. entry_SYSCALL_64 uses it as scratch space to stash 327 * the user RSP value. 328 */ 329 u64 sp2; 330 331 u64 reserved2; 332 u64 ist[7]; 333 u32 reserved3; 334 u32 reserved4; 335 u16 reserved5; 336 u16 io_bitmap_base; 337 338 } __attribute__((packed)); 339 #endif 340 341 /* 342 * IO-bitmap sizes: 343 */ 344 #define IO_BITMAP_BITS 65536 345 #define IO_BITMAP_BYTES (IO_BITMAP_BITS / BITS_PER_BYTE) 346 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long)) 347 348 #define IO_BITMAP_OFFSET_VALID_MAP \ 349 (offsetof(struct tss_struct, io_bitmap.bitmap) - \ 350 offsetof(struct tss_struct, x86_tss)) 351 352 #define IO_BITMAP_OFFSET_VALID_ALL \ 353 (offsetof(struct tss_struct, io_bitmap.mapall) - \ 354 offsetof(struct tss_struct, x86_tss)) 355 356 #ifdef CONFIG_X86_IOPL_IOPERM 357 /* 358 * sizeof(unsigned long) coming from an extra "long" at the end of the 359 * iobitmap. The limit is inclusive, i.e. the last valid byte. 360 */ 361 # define __KERNEL_TSS_LIMIT \ 362 (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \ 363 sizeof(unsigned long) - 1) 364 #else 365 # define __KERNEL_TSS_LIMIT \ 366 (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1) 367 #endif 368 369 /* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */ 370 #define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1) 371 372 struct entry_stack { 373 char stack[PAGE_SIZE]; 374 }; 375 376 struct entry_stack_page { 377 struct entry_stack stack; 378 } __aligned(PAGE_SIZE); 379 380 /* 381 * All IO bitmap related data stored in the TSS: 382 */ 383 struct x86_io_bitmap { 384 /* The sequence number of the last active bitmap. */ 385 u64 prev_sequence; 386 387 /* 388 * Store the dirty size of the last io bitmap offender. The next 389 * one will have to do the cleanup as the switch out to a non io 390 * bitmap user will just set x86_tss.io_bitmap_base to a value 391 * outside of the TSS limit. So for sane tasks there is no need to 392 * actually touch the io_bitmap at all. 393 */ 394 unsigned int prev_max; 395 396 /* 397 * The extra 1 is there because the CPU will access an 398 * additional byte beyond the end of the IO permission 399 * bitmap. The extra byte must be all 1 bits, and must 400 * be within the limit. 401 */ 402 unsigned long bitmap[IO_BITMAP_LONGS + 1]; 403 404 /* 405 * Special I/O bitmap to emulate IOPL(3). All bytes zero, 406 * except the additional byte at the end. 407 */ 408 unsigned long mapall[IO_BITMAP_LONGS + 1]; 409 }; 410 411 struct tss_struct { 412 /* 413 * The fixed hardware portion. This must not cross a page boundary 414 * at risk of violating the SDM's advice and potentially triggering 415 * errata. 416 */ 417 struct x86_hw_tss x86_tss; 418 419 struct x86_io_bitmap io_bitmap; 420 } __aligned(PAGE_SIZE); 421 422 DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw); 423 424 /* Per CPU interrupt stacks */ 425 struct irq_stack { 426 char stack[IRQ_STACK_SIZE]; 427 } __aligned(IRQ_STACK_SIZE); 428 429 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); 430 431 #ifdef CONFIG_X86_64 432 struct fixed_percpu_data { 433 /* 434 * GCC hardcodes the stack canary as %gs:40. Since the 435 * irq_stack is the object at %gs:0, we reserve the bottom 436 * 48 bytes of the irq stack for the canary. 437 * 438 * Once we are willing to require -mstack-protector-guard-symbol= 439 * support for x86_64 stackprotector, we can get rid of this. 440 */ 441 char gs_base[40]; 442 unsigned long stack_canary; 443 }; 444 445 DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible; 446 DECLARE_INIT_PER_CPU(fixed_percpu_data); 447 448 static inline unsigned long cpu_kernelmode_gs_base(int cpu) 449 { 450 return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu); 451 } 452 453 DECLARE_PER_CPU(void *, hardirq_stack_ptr); 454 DECLARE_PER_CPU(bool, hardirq_stack_inuse); 455 extern asmlinkage void ignore_sysret(void); 456 457 /* Save actual FS/GS selectors and bases to current->thread */ 458 void current_save_fsgs(void); 459 #else /* X86_64 */ 460 #ifdef CONFIG_STACKPROTECTOR 461 DECLARE_PER_CPU(unsigned long, __stack_chk_guard); 462 #endif 463 DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); 464 DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr); 465 #endif /* !X86_64 */ 466 467 struct perf_event; 468 469 struct thread_struct { 470 /* Cached TLS descriptors: */ 471 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; 472 #ifdef CONFIG_X86_32 473 unsigned long sp0; 474 #endif 475 unsigned long sp; 476 #ifdef CONFIG_X86_32 477 unsigned long sysenter_cs; 478 #else 479 unsigned short es; 480 unsigned short ds; 481 unsigned short fsindex; 482 unsigned short gsindex; 483 #endif 484 485 #ifdef CONFIG_X86_64 486 unsigned long fsbase; 487 unsigned long gsbase; 488 #else 489 /* 490 * XXX: this could presumably be unsigned short. Alternatively, 491 * 32-bit kernels could be taught to use fsindex instead. 492 */ 493 unsigned long fs; 494 unsigned long gs; 495 #endif 496 497 /* Save middle states of ptrace breakpoints */ 498 struct perf_event *ptrace_bps[HBP_NUM]; 499 /* Debug status used for traps, single steps, etc... */ 500 unsigned long virtual_dr6; 501 /* Keep track of the exact dr7 value set by the user */ 502 unsigned long ptrace_dr7; 503 /* Fault info: */ 504 unsigned long cr2; 505 unsigned long trap_nr; 506 unsigned long error_code; 507 #ifdef CONFIG_VM86 508 /* Virtual 86 mode info */ 509 struct vm86 *vm86; 510 #endif 511 /* IO permissions: */ 512 struct io_bitmap *io_bitmap; 513 514 /* 515 * IOPL. Privilege level dependent I/O permission which is 516 * emulated via the I/O bitmap to prevent user space from disabling 517 * interrupts. 518 */ 519 unsigned long iopl_emul; 520 521 unsigned int iopl_warn:1; 522 unsigned int sig_on_uaccess_err:1; 523 524 /* 525 * Protection Keys Register for Userspace. Loaded immediately on 526 * context switch. Store it in thread_struct to avoid a lookup in 527 * the tasks's FPU xstate buffer. This value is only valid when a 528 * task is scheduled out. For 'current' the authoritative source of 529 * PKRU is the hardware itself. 530 */ 531 u32 pkru; 532 533 /* Floating point and extended processor state */ 534 struct fpu fpu; 535 /* 536 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at 537 * the end. 538 */ 539 }; 540 541 extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size); 542 543 static inline void arch_thread_struct_whitelist(unsigned long *offset, 544 unsigned long *size) 545 { 546 fpu_thread_struct_whitelist(offset, size); 547 } 548 549 static inline void 550 native_load_sp0(unsigned long sp0) 551 { 552 this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); 553 } 554 555 static __always_inline void native_swapgs(void) 556 { 557 #ifdef CONFIG_X86_64 558 asm volatile("swapgs" ::: "memory"); 559 #endif 560 } 561 562 static inline unsigned long current_top_of_stack(void) 563 { 564 /* 565 * We can't read directly from tss.sp0: sp0 on x86_32 is special in 566 * and around vm86 mode and sp0 on x86_64 is special because of the 567 * entry trampoline. 568 */ 569 return this_cpu_read_stable(cpu_current_top_of_stack); 570 } 571 572 static inline bool on_thread_stack(void) 573 { 574 return (unsigned long)(current_top_of_stack() - 575 current_stack_pointer) < THREAD_SIZE; 576 } 577 578 #ifdef CONFIG_PARAVIRT_XXL 579 #include <asm/paravirt.h> 580 #else 581 #define __cpuid native_cpuid 582 583 static inline void load_sp0(unsigned long sp0) 584 { 585 native_load_sp0(sp0); 586 } 587 588 #endif /* CONFIG_PARAVIRT_XXL */ 589 590 /* Free all resources held by a thread. */ 591 extern void release_thread(struct task_struct *); 592 593 unsigned long __get_wchan(struct task_struct *p); 594 595 /* 596 * Generic CPUID function 597 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx 598 * resulting in stale register contents being returned. 599 */ 600 static inline void cpuid(unsigned int op, 601 unsigned int *eax, unsigned int *ebx, 602 unsigned int *ecx, unsigned int *edx) 603 { 604 *eax = op; 605 *ecx = 0; 606 __cpuid(eax, ebx, ecx, edx); 607 } 608 609 /* Some CPUID calls want 'count' to be placed in ecx */ 610 static inline void cpuid_count(unsigned int op, int count, 611 unsigned int *eax, unsigned int *ebx, 612 unsigned int *ecx, unsigned int *edx) 613 { 614 *eax = op; 615 *ecx = count; 616 __cpuid(eax, ebx, ecx, edx); 617 } 618 619 /* 620 * CPUID functions returning a single datum 621 */ 622 static inline unsigned int cpuid_eax(unsigned int op) 623 { 624 unsigned int eax, ebx, ecx, edx; 625 626 cpuid(op, &eax, &ebx, &ecx, &edx); 627 628 return eax; 629 } 630 631 static inline unsigned int cpuid_ebx(unsigned int op) 632 { 633 unsigned int eax, ebx, ecx, edx; 634 635 cpuid(op, &eax, &ebx, &ecx, &edx); 636 637 return ebx; 638 } 639 640 static inline unsigned int cpuid_ecx(unsigned int op) 641 { 642 unsigned int eax, ebx, ecx, edx; 643 644 cpuid(op, &eax, &ebx, &ecx, &edx); 645 646 return ecx; 647 } 648 649 static inline unsigned int cpuid_edx(unsigned int op) 650 { 651 unsigned int eax, ebx, ecx, edx; 652 653 cpuid(op, &eax, &ebx, &ecx, &edx); 654 655 return edx; 656 } 657 658 extern void select_idle_routine(const struct cpuinfo_x86 *c); 659 extern void amd_e400_c1e_apic_setup(void); 660 661 extern unsigned long boot_option_idle_override; 662 663 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 664 IDLE_POLL}; 665 666 extern void enable_sep_cpu(void); 667 extern int sysenter_setup(void); 668 669 670 /* Defined in head.S */ 671 extern struct desc_ptr early_gdt_descr; 672 673 extern void switch_to_new_gdt(int); 674 extern void load_direct_gdt(int); 675 extern void load_fixmap_gdt(int); 676 extern void load_percpu_segment(int); 677 extern void cpu_init(void); 678 extern void cpu_init_secondary(void); 679 extern void cpu_init_exception_handling(void); 680 extern void cr4_init(void); 681 682 static inline unsigned long get_debugctlmsr(void) 683 { 684 unsigned long debugctlmsr = 0; 685 686 #ifndef CONFIG_X86_DEBUGCTLMSR 687 if (boot_cpu_data.x86 < 6) 688 return 0; 689 #endif 690 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 691 692 return debugctlmsr; 693 } 694 695 static inline void update_debugctlmsr(unsigned long debugctlmsr) 696 { 697 #ifndef CONFIG_X86_DEBUGCTLMSR 698 if (boot_cpu_data.x86 < 6) 699 return; 700 #endif 701 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); 702 } 703 704 extern void set_task_blockstep(struct task_struct *task, bool on); 705 706 /* Boot loader type from the setup header: */ 707 extern int bootloader_type; 708 extern int bootloader_version; 709 710 extern char ignore_fpu_irq; 711 712 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 713 #define ARCH_HAS_PREFETCHW 714 #define ARCH_HAS_SPINLOCK_PREFETCH 715 716 #ifdef CONFIG_X86_32 717 # define BASE_PREFETCH "" 718 # define ARCH_HAS_PREFETCH 719 #else 720 # define BASE_PREFETCH "prefetcht0 %P1" 721 #endif 722 723 /* 724 * Prefetch instructions for Pentium III (+) and AMD Athlon (+) 725 * 726 * It's not worth to care about 3dnow prefetches for the K6 727 * because they are microcoded there and very slow. 728 */ 729 static inline void prefetch(const void *x) 730 { 731 alternative_input(BASE_PREFETCH, "prefetchnta %P1", 732 X86_FEATURE_XMM, 733 "m" (*(const char *)x)); 734 } 735 736 /* 737 * 3dnow prefetch to get an exclusive cache line. 738 * Useful for spinlocks to avoid one state transition in the 739 * cache coherency protocol: 740 */ 741 static __always_inline void prefetchw(const void *x) 742 { 743 alternative_input(BASE_PREFETCH, "prefetchw %P1", 744 X86_FEATURE_3DNOWPREFETCH, 745 "m" (*(const char *)x)); 746 } 747 748 static inline void spin_lock_prefetch(const void *x) 749 { 750 prefetchw(x); 751 } 752 753 #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ 754 TOP_OF_KERNEL_STACK_PADDING) 755 756 #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) 757 758 #define task_pt_regs(task) \ 759 ({ \ 760 unsigned long __ptr = (unsigned long)task_stack_page(task); \ 761 __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ 762 ((struct pt_regs *)__ptr) - 1; \ 763 }) 764 765 #ifdef CONFIG_X86_32 766 #define INIT_THREAD { \ 767 .sp0 = TOP_OF_INIT_STACK, \ 768 .sysenter_cs = __KERNEL_CS, \ 769 } 770 771 #define KSTK_ESP(task) (task_pt_regs(task)->sp) 772 773 #else 774 #define INIT_THREAD { } 775 776 extern unsigned long KSTK_ESP(struct task_struct *task); 777 778 #endif /* CONFIG_X86_64 */ 779 780 extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 781 unsigned long new_sp); 782 783 /* 784 * This decides where the kernel will search for a free chunk of vm 785 * space during mmap's. 786 */ 787 #define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3)) 788 #define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW) 789 790 #define KSTK_EIP(task) (task_pt_regs(task)->ip) 791 792 /* Get/set a process' ability to use the timestamp counter instruction */ 793 #define GET_TSC_CTL(adr) get_tsc_mode((adr)) 794 #define SET_TSC_CTL(val) set_tsc_mode((val)) 795 796 extern int get_tsc_mode(unsigned long adr); 797 extern int set_tsc_mode(unsigned int val); 798 799 DECLARE_PER_CPU(u64, msr_misc_features_shadow); 800 801 extern u16 get_llc_id(unsigned int cpu); 802 803 #ifdef CONFIG_CPU_SUP_AMD 804 extern u32 amd_get_nodes_per_socket(void); 805 extern u32 amd_get_highest_perf(void); 806 #else 807 static inline u32 amd_get_nodes_per_socket(void) { return 0; } 808 static inline u32 amd_get_highest_perf(void) { return 0; } 809 #endif 810 811 #define for_each_possible_hypervisor_cpuid_base(function) \ 812 for (function = 0x40000000; function < 0x40010000; function += 0x100) 813 814 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) 815 { 816 uint32_t base, eax, signature[3]; 817 818 for_each_possible_hypervisor_cpuid_base(base) { 819 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); 820 821 if (!memcmp(sig, signature, 12) && 822 (leaves == 0 || ((eax - base) >= leaves))) 823 return base; 824 } 825 826 return 0; 827 } 828 829 extern unsigned long arch_align_stack(unsigned long sp); 830 void free_init_pages(const char *what, unsigned long begin, unsigned long end); 831 extern void free_kernel_image_pages(const char *what, void *begin, void *end); 832 833 void default_idle(void); 834 #ifdef CONFIG_XEN 835 bool xen_set_default_idle(void); 836 #else 837 #define xen_set_default_idle 0 838 #endif 839 840 void __noreturn stop_this_cpu(void *dummy); 841 void microcode_check(void); 842 843 enum l1tf_mitigations { 844 L1TF_MITIGATION_OFF, 845 L1TF_MITIGATION_FLUSH_NOWARN, 846 L1TF_MITIGATION_FLUSH, 847 L1TF_MITIGATION_FLUSH_NOSMT, 848 L1TF_MITIGATION_FULL, 849 L1TF_MITIGATION_FULL_FORCE 850 }; 851 852 extern enum l1tf_mitigations l1tf_mitigation; 853 854 enum mds_mitigations { 855 MDS_MITIGATION_OFF, 856 MDS_MITIGATION_FULL, 857 MDS_MITIGATION_VMWERV, 858 }; 859 860 #ifdef CONFIG_X86_SGX 861 int arch_memory_failure(unsigned long pfn, int flags); 862 #define arch_memory_failure arch_memory_failure 863 864 bool arch_is_platform_page(u64 paddr); 865 #define arch_is_platform_page arch_is_platform_page 866 #endif 867 868 #endif /* _ASM_X86_PROCESSOR_H */ 869