xref: /openbmc/linux/arch/x86/include/asm/processor.h (revision 185c8f33)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PROCESSOR_H
3 #define _ASM_X86_PROCESSOR_H
4 
5 #include <asm/processor-flags.h>
6 
7 /* Forward declaration, a strange C thing */
8 struct task_struct;
9 struct mm_struct;
10 struct io_bitmap;
11 struct vm86;
12 
13 #include <asm/math_emu.h>
14 #include <asm/segment.h>
15 #include <asm/types.h>
16 #include <uapi/asm/sigcontext.h>
17 #include <asm/current.h>
18 #include <asm/cpufeatures.h>
19 #include <asm/cpuid.h>
20 #include <asm/page.h>
21 #include <asm/pgtable_types.h>
22 #include <asm/percpu.h>
23 #include <asm/msr.h>
24 #include <asm/desc_defs.h>
25 #include <asm/nops.h>
26 #include <asm/special_insns.h>
27 #include <asm/fpu/types.h>
28 #include <asm/unwind_hints.h>
29 #include <asm/vmxfeatures.h>
30 #include <asm/vdso/processor.h>
31 
32 #include <linux/personality.h>
33 #include <linux/cache.h>
34 #include <linux/threads.h>
35 #include <linux/math64.h>
36 #include <linux/err.h>
37 #include <linux/irqflags.h>
38 #include <linux/mem_encrypt.h>
39 
40 /*
41  * We handle most unaligned accesses in hardware.  On the other hand
42  * unaligned DMA can be quite expensive on some Nehalem processors.
43  *
44  * Based on this we disable the IP header alignment in network drivers.
45  */
46 #define NET_IP_ALIGN	0
47 
48 #define HBP_NUM 4
49 
50 /*
51  * These alignment constraints are for performance in the vSMP case,
52  * but in the task_struct case we must also meet hardware imposed
53  * alignment requirements of the FPU state:
54  */
55 #ifdef CONFIG_X86_VSMP
56 # define ARCH_MIN_TASKALIGN		(1 << INTERNODE_CACHE_SHIFT)
57 # define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT)
58 #else
59 # define ARCH_MIN_TASKALIGN		__alignof__(union fpregs_state)
60 # define ARCH_MIN_MMSTRUCT_ALIGN	0
61 #endif
62 
63 enum tlb_infos {
64 	ENTRIES,
65 	NR_INFO
66 };
67 
68 extern u16 __read_mostly tlb_lli_4k[NR_INFO];
69 extern u16 __read_mostly tlb_lli_2m[NR_INFO];
70 extern u16 __read_mostly tlb_lli_4m[NR_INFO];
71 extern u16 __read_mostly tlb_lld_4k[NR_INFO];
72 extern u16 __read_mostly tlb_lld_2m[NR_INFO];
73 extern u16 __read_mostly tlb_lld_4m[NR_INFO];
74 extern u16 __read_mostly tlb_lld_1g[NR_INFO];
75 
76 /*
77  *  CPU type and hardware bug flags. Kept separately for each CPU.
78  *  Members of this structure are referenced in head_32.S, so think twice
79  *  before touching them. [mj]
80  */
81 
82 struct cpuinfo_x86 {
83 	__u8			x86;		/* CPU family */
84 	__u8			x86_vendor;	/* CPU vendor */
85 	__u8			x86_model;
86 	__u8			x86_stepping;
87 #ifdef CONFIG_X86_64
88 	/* Number of 4K pages in DTLB/ITLB combined(in pages): */
89 	int			x86_tlbsize;
90 #endif
91 #ifdef CONFIG_X86_VMX_FEATURE_NAMES
92 	__u32			vmx_capability[NVMXINTS];
93 #endif
94 	__u8			x86_virt_bits;
95 	__u8			x86_phys_bits;
96 	/* CPUID returned core id bits: */
97 	__u8			x86_coreid_bits;
98 	__u8			cu_id;
99 	/* Max extended CPUID function supported: */
100 	__u32			extended_cpuid_level;
101 	/* Maximum supported CPUID level, -1=no CPUID: */
102 	int			cpuid_level;
103 	/*
104 	 * Align to size of unsigned long because the x86_capability array
105 	 * is passed to bitops which require the alignment. Use unnamed
106 	 * union to enforce the array is aligned to size of unsigned long.
107 	 */
108 	union {
109 		__u32		x86_capability[NCAPINTS + NBUGINTS];
110 		unsigned long	x86_capability_alignment;
111 	};
112 	char			x86_vendor_id[16];
113 	char			x86_model_id[64];
114 	/* in KB - valid for CPUS which support this call: */
115 	unsigned int		x86_cache_size;
116 	int			x86_cache_alignment;	/* In bytes */
117 	/* Cache QoS architectural values, valid only on the BSP: */
118 	int			x86_cache_max_rmid;	/* max index */
119 	int			x86_cache_occ_scale;	/* scale to bytes */
120 	int			x86_cache_mbm_width_offset;
121 	int			x86_power;
122 	unsigned long		loops_per_jiffy;
123 	/* protected processor identification number */
124 	u64			ppin;
125 	/* cpuid returned max cores value: */
126 	u16			x86_max_cores;
127 	u16			apicid;
128 	u16			initial_apicid;
129 	u16			x86_clflush_size;
130 	/* number of cores as seen by the OS: */
131 	u16			booted_cores;
132 	/* Physical processor id: */
133 	u16			phys_proc_id;
134 	/* Logical processor id: */
135 	u16			logical_proc_id;
136 	/* Core id: */
137 	u16			cpu_core_id;
138 	u16			cpu_die_id;
139 	u16			logical_die_id;
140 	/* Index into per_cpu list: */
141 	u16			cpu_index;
142 	/*  Is SMT active on this core? */
143 	bool			smt_active;
144 	u32			microcode;
145 	/* Address space bits used by the cache internally */
146 	u8			x86_cache_bits;
147 	unsigned		initialized : 1;
148 } __randomize_layout;
149 
150 #define X86_VENDOR_INTEL	0
151 #define X86_VENDOR_CYRIX	1
152 #define X86_VENDOR_AMD		2
153 #define X86_VENDOR_UMC		3
154 #define X86_VENDOR_CENTAUR	5
155 #define X86_VENDOR_TRANSMETA	7
156 #define X86_VENDOR_NSC		8
157 #define X86_VENDOR_HYGON	9
158 #define X86_VENDOR_ZHAOXIN	10
159 #define X86_VENDOR_VORTEX	11
160 #define X86_VENDOR_NUM		12
161 
162 #define X86_VENDOR_UNKNOWN	0xff
163 
164 /*
165  * capabilities of CPUs
166  */
167 extern struct cpuinfo_x86	boot_cpu_data;
168 extern struct cpuinfo_x86	new_cpu_data;
169 
170 extern __u32			cpu_caps_cleared[NCAPINTS + NBUGINTS];
171 extern __u32			cpu_caps_set[NCAPINTS + NBUGINTS];
172 
173 #ifdef CONFIG_SMP
174 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
175 #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
176 #else
177 #define cpu_info		boot_cpu_data
178 #define cpu_data(cpu)		boot_cpu_data
179 #endif
180 
181 extern const struct seq_operations cpuinfo_op;
182 
183 #define cache_line_size()	(boot_cpu_data.x86_cache_alignment)
184 
185 extern void cpu_detect(struct cpuinfo_x86 *c);
186 
187 static inline unsigned long long l1tf_pfn_limit(void)
188 {
189 	return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
190 }
191 
192 extern void early_cpu_init(void);
193 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
194 extern void print_cpu_info(struct cpuinfo_x86 *);
195 void print_cpu_msr(struct cpuinfo_x86 *);
196 
197 /*
198  * Friendlier CR3 helpers.
199  */
200 static inline unsigned long read_cr3_pa(void)
201 {
202 	return __read_cr3() & CR3_ADDR_MASK;
203 }
204 
205 static inline unsigned long native_read_cr3_pa(void)
206 {
207 	return __native_read_cr3() & CR3_ADDR_MASK;
208 }
209 
210 static inline void load_cr3(pgd_t *pgdir)
211 {
212 	write_cr3(__sme_pa(pgdir));
213 }
214 
215 /*
216  * Note that while the legacy 'TSS' name comes from 'Task State Segment',
217  * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
218  * unrelated to the task-switch mechanism:
219  */
220 #ifdef CONFIG_X86_32
221 /* This is the TSS defined by the hardware. */
222 struct x86_hw_tss {
223 	unsigned short		back_link, __blh;
224 	unsigned long		sp0;
225 	unsigned short		ss0, __ss0h;
226 	unsigned long		sp1;
227 
228 	/*
229 	 * We don't use ring 1, so ss1 is a convenient scratch space in
230 	 * the same cacheline as sp0.  We use ss1 to cache the value in
231 	 * MSR_IA32_SYSENTER_CS.  When we context switch
232 	 * MSR_IA32_SYSENTER_CS, we first check if the new value being
233 	 * written matches ss1, and, if it's not, then we wrmsr the new
234 	 * value and update ss1.
235 	 *
236 	 * The only reason we context switch MSR_IA32_SYSENTER_CS is
237 	 * that we set it to zero in vm86 tasks to avoid corrupting the
238 	 * stack if we were to go through the sysenter path from vm86
239 	 * mode.
240 	 */
241 	unsigned short		ss1;	/* MSR_IA32_SYSENTER_CS */
242 
243 	unsigned short		__ss1h;
244 	unsigned long		sp2;
245 	unsigned short		ss2, __ss2h;
246 	unsigned long		__cr3;
247 	unsigned long		ip;
248 	unsigned long		flags;
249 	unsigned long		ax;
250 	unsigned long		cx;
251 	unsigned long		dx;
252 	unsigned long		bx;
253 	unsigned long		sp;
254 	unsigned long		bp;
255 	unsigned long		si;
256 	unsigned long		di;
257 	unsigned short		es, __esh;
258 	unsigned short		cs, __csh;
259 	unsigned short		ss, __ssh;
260 	unsigned short		ds, __dsh;
261 	unsigned short		fs, __fsh;
262 	unsigned short		gs, __gsh;
263 	unsigned short		ldt, __ldth;
264 	unsigned short		trace;
265 	unsigned short		io_bitmap_base;
266 
267 } __attribute__((packed));
268 #else
269 struct x86_hw_tss {
270 	u32			reserved1;
271 	u64			sp0;
272 	u64			sp1;
273 
274 	/*
275 	 * Since Linux does not use ring 2, the 'sp2' slot is unused by
276 	 * hardware.  entry_SYSCALL_64 uses it as scratch space to stash
277 	 * the user RSP value.
278 	 */
279 	u64			sp2;
280 
281 	u64			reserved2;
282 	u64			ist[7];
283 	u32			reserved3;
284 	u32			reserved4;
285 	u16			reserved5;
286 	u16			io_bitmap_base;
287 
288 } __attribute__((packed));
289 #endif
290 
291 /*
292  * IO-bitmap sizes:
293  */
294 #define IO_BITMAP_BITS			65536
295 #define IO_BITMAP_BYTES			(IO_BITMAP_BITS / BITS_PER_BYTE)
296 #define IO_BITMAP_LONGS			(IO_BITMAP_BYTES / sizeof(long))
297 
298 #define IO_BITMAP_OFFSET_VALID_MAP				\
299 	(offsetof(struct tss_struct, io_bitmap.bitmap) -	\
300 	 offsetof(struct tss_struct, x86_tss))
301 
302 #define IO_BITMAP_OFFSET_VALID_ALL				\
303 	(offsetof(struct tss_struct, io_bitmap.mapall) -	\
304 	 offsetof(struct tss_struct, x86_tss))
305 
306 #ifdef CONFIG_X86_IOPL_IOPERM
307 /*
308  * sizeof(unsigned long) coming from an extra "long" at the end of the
309  * iobitmap. The limit is inclusive, i.e. the last valid byte.
310  */
311 # define __KERNEL_TSS_LIMIT	\
312 	(IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
313 	 sizeof(unsigned long) - 1)
314 #else
315 # define __KERNEL_TSS_LIMIT	\
316 	(offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
317 #endif
318 
319 /* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
320 #define IO_BITMAP_OFFSET_INVALID	(__KERNEL_TSS_LIMIT + 1)
321 
322 struct entry_stack {
323 	char	stack[PAGE_SIZE];
324 };
325 
326 struct entry_stack_page {
327 	struct entry_stack stack;
328 } __aligned(PAGE_SIZE);
329 
330 /*
331  * All IO bitmap related data stored in the TSS:
332  */
333 struct x86_io_bitmap {
334 	/* The sequence number of the last active bitmap. */
335 	u64			prev_sequence;
336 
337 	/*
338 	 * Store the dirty size of the last io bitmap offender. The next
339 	 * one will have to do the cleanup as the switch out to a non io
340 	 * bitmap user will just set x86_tss.io_bitmap_base to a value
341 	 * outside of the TSS limit. So for sane tasks there is no need to
342 	 * actually touch the io_bitmap at all.
343 	 */
344 	unsigned int		prev_max;
345 
346 	/*
347 	 * The extra 1 is there because the CPU will access an
348 	 * additional byte beyond the end of the IO permission
349 	 * bitmap. The extra byte must be all 1 bits, and must
350 	 * be within the limit.
351 	 */
352 	unsigned long		bitmap[IO_BITMAP_LONGS + 1];
353 
354 	/*
355 	 * Special I/O bitmap to emulate IOPL(3). All bytes zero,
356 	 * except the additional byte at the end.
357 	 */
358 	unsigned long		mapall[IO_BITMAP_LONGS + 1];
359 };
360 
361 struct tss_struct {
362 	/*
363 	 * The fixed hardware portion.  This must not cross a page boundary
364 	 * at risk of violating the SDM's advice and potentially triggering
365 	 * errata.
366 	 */
367 	struct x86_hw_tss	x86_tss;
368 
369 	struct x86_io_bitmap	io_bitmap;
370 } __aligned(PAGE_SIZE);
371 
372 DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
373 
374 /* Per CPU interrupt stacks */
375 struct irq_stack {
376 	char		stack[IRQ_STACK_SIZE];
377 } __aligned(IRQ_STACK_SIZE);
378 
379 #ifdef CONFIG_X86_64
380 struct fixed_percpu_data {
381 	/*
382 	 * GCC hardcodes the stack canary as %gs:40.  Since the
383 	 * irq_stack is the object at %gs:0, we reserve the bottom
384 	 * 48 bytes of the irq stack for the canary.
385 	 *
386 	 * Once we are willing to require -mstack-protector-guard-symbol=
387 	 * support for x86_64 stackprotector, we can get rid of this.
388 	 */
389 	char		gs_base[40];
390 	unsigned long	stack_canary;
391 };
392 
393 DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
394 DECLARE_INIT_PER_CPU(fixed_percpu_data);
395 
396 static inline unsigned long cpu_kernelmode_gs_base(int cpu)
397 {
398 	return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
399 }
400 
401 extern asmlinkage void ignore_sysret(void);
402 
403 /* Save actual FS/GS selectors and bases to current->thread */
404 void current_save_fsgs(void);
405 #else	/* X86_64 */
406 #ifdef CONFIG_STACKPROTECTOR
407 DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
408 #endif
409 #endif	/* !X86_64 */
410 
411 struct perf_event;
412 
413 struct thread_struct {
414 	/* Cached TLS descriptors: */
415 	struct desc_struct	tls_array[GDT_ENTRY_TLS_ENTRIES];
416 #ifdef CONFIG_X86_32
417 	unsigned long		sp0;
418 #endif
419 	unsigned long		sp;
420 #ifdef CONFIG_X86_32
421 	unsigned long		sysenter_cs;
422 #else
423 	unsigned short		es;
424 	unsigned short		ds;
425 	unsigned short		fsindex;
426 	unsigned short		gsindex;
427 #endif
428 
429 #ifdef CONFIG_X86_64
430 	unsigned long		fsbase;
431 	unsigned long		gsbase;
432 #else
433 	/*
434 	 * XXX: this could presumably be unsigned short.  Alternatively,
435 	 * 32-bit kernels could be taught to use fsindex instead.
436 	 */
437 	unsigned long fs;
438 	unsigned long gs;
439 #endif
440 
441 	/* Save middle states of ptrace breakpoints */
442 	struct perf_event	*ptrace_bps[HBP_NUM];
443 	/* Debug status used for traps, single steps, etc... */
444 	unsigned long           virtual_dr6;
445 	/* Keep track of the exact dr7 value set by the user */
446 	unsigned long           ptrace_dr7;
447 	/* Fault info: */
448 	unsigned long		cr2;
449 	unsigned long		trap_nr;
450 	unsigned long		error_code;
451 #ifdef CONFIG_VM86
452 	/* Virtual 86 mode info */
453 	struct vm86		*vm86;
454 #endif
455 	/* IO permissions: */
456 	struct io_bitmap	*io_bitmap;
457 
458 	/*
459 	 * IOPL. Privilege level dependent I/O permission which is
460 	 * emulated via the I/O bitmap to prevent user space from disabling
461 	 * interrupts.
462 	 */
463 	unsigned long		iopl_emul;
464 
465 	unsigned int		iopl_warn:1;
466 	unsigned int		sig_on_uaccess_err:1;
467 
468 	/*
469 	 * Protection Keys Register for Userspace.  Loaded immediately on
470 	 * context switch. Store it in thread_struct to avoid a lookup in
471 	 * the tasks's FPU xstate buffer. This value is only valid when a
472 	 * task is scheduled out. For 'current' the authoritative source of
473 	 * PKRU is the hardware itself.
474 	 */
475 	u32			pkru;
476 
477 	/* Floating point and extended processor state */
478 	struct fpu		fpu;
479 	/*
480 	 * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
481 	 * the end.
482 	 */
483 };
484 
485 extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size);
486 
487 static inline void arch_thread_struct_whitelist(unsigned long *offset,
488 						unsigned long *size)
489 {
490 	fpu_thread_struct_whitelist(offset, size);
491 }
492 
493 static inline void
494 native_load_sp0(unsigned long sp0)
495 {
496 	this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
497 }
498 
499 static __always_inline void native_swapgs(void)
500 {
501 #ifdef CONFIG_X86_64
502 	asm volatile("swapgs" ::: "memory");
503 #endif
504 }
505 
506 static __always_inline unsigned long current_top_of_stack(void)
507 {
508 	/*
509 	 *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
510 	 *  and around vm86 mode and sp0 on x86_64 is special because of the
511 	 *  entry trampoline.
512 	 */
513 	return this_cpu_read_stable(pcpu_hot.top_of_stack);
514 }
515 
516 static __always_inline bool on_thread_stack(void)
517 {
518 	return (unsigned long)(current_top_of_stack() -
519 			       current_stack_pointer) < THREAD_SIZE;
520 }
521 
522 #ifdef CONFIG_PARAVIRT_XXL
523 #include <asm/paravirt.h>
524 #else
525 
526 static inline void load_sp0(unsigned long sp0)
527 {
528 	native_load_sp0(sp0);
529 }
530 
531 #endif /* CONFIG_PARAVIRT_XXL */
532 
533 unsigned long __get_wchan(struct task_struct *p);
534 
535 extern void select_idle_routine(const struct cpuinfo_x86 *c);
536 extern void amd_e400_c1e_apic_setup(void);
537 
538 extern unsigned long		boot_option_idle_override;
539 
540 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
541 			 IDLE_POLL};
542 
543 extern void enable_sep_cpu(void);
544 
545 
546 /* Defined in head.S */
547 extern struct desc_ptr		early_gdt_descr;
548 
549 extern void switch_gdt_and_percpu_base(int);
550 extern void load_direct_gdt(int);
551 extern void load_fixmap_gdt(int);
552 extern void cpu_init(void);
553 extern void cpu_init_exception_handling(void);
554 extern void cr4_init(void);
555 
556 static inline unsigned long get_debugctlmsr(void)
557 {
558 	unsigned long debugctlmsr = 0;
559 
560 #ifndef CONFIG_X86_DEBUGCTLMSR
561 	if (boot_cpu_data.x86 < 6)
562 		return 0;
563 #endif
564 	rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
565 
566 	return debugctlmsr;
567 }
568 
569 static inline void update_debugctlmsr(unsigned long debugctlmsr)
570 {
571 #ifndef CONFIG_X86_DEBUGCTLMSR
572 	if (boot_cpu_data.x86 < 6)
573 		return;
574 #endif
575 	wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
576 }
577 
578 extern void set_task_blockstep(struct task_struct *task, bool on);
579 
580 /* Boot loader type from the setup header: */
581 extern int			bootloader_type;
582 extern int			bootloader_version;
583 
584 extern char			ignore_fpu_irq;
585 
586 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
587 #define ARCH_HAS_PREFETCHW
588 #define ARCH_HAS_SPINLOCK_PREFETCH
589 
590 #ifdef CONFIG_X86_32
591 # define BASE_PREFETCH		""
592 # define ARCH_HAS_PREFETCH
593 #else
594 # define BASE_PREFETCH		"prefetcht0 %P1"
595 #endif
596 
597 /*
598  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
599  *
600  * It's not worth to care about 3dnow prefetches for the K6
601  * because they are microcoded there and very slow.
602  */
603 static inline void prefetch(const void *x)
604 {
605 	alternative_input(BASE_PREFETCH, "prefetchnta %P1",
606 			  X86_FEATURE_XMM,
607 			  "m" (*(const char *)x));
608 }
609 
610 /*
611  * 3dnow prefetch to get an exclusive cache line.
612  * Useful for spinlocks to avoid one state transition in the
613  * cache coherency protocol:
614  */
615 static __always_inline void prefetchw(const void *x)
616 {
617 	alternative_input(BASE_PREFETCH, "prefetchw %P1",
618 			  X86_FEATURE_3DNOWPREFETCH,
619 			  "m" (*(const char *)x));
620 }
621 
622 static inline void spin_lock_prefetch(const void *x)
623 {
624 	prefetchw(x);
625 }
626 
627 #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
628 			   TOP_OF_KERNEL_STACK_PADDING)
629 
630 #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
631 
632 #define task_pt_regs(task) \
633 ({									\
634 	unsigned long __ptr = (unsigned long)task_stack_page(task);	\
635 	__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;		\
636 	((struct pt_regs *)__ptr) - 1;					\
637 })
638 
639 #ifdef CONFIG_X86_32
640 #define INIT_THREAD  {							  \
641 	.sp0			= TOP_OF_INIT_STACK,			  \
642 	.sysenter_cs		= __KERNEL_CS,				  \
643 }
644 
645 #define KSTK_ESP(task)		(task_pt_regs(task)->sp)
646 
647 #else
648 extern unsigned long __end_init_task[];
649 
650 #define INIT_THREAD {							    \
651 	.sp	= (unsigned long)&__end_init_task - sizeof(struct pt_regs), \
652 }
653 
654 extern unsigned long KSTK_ESP(struct task_struct *task);
655 
656 #endif /* CONFIG_X86_64 */
657 
658 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
659 					       unsigned long new_sp);
660 
661 /*
662  * This decides where the kernel will search for a free chunk of vm
663  * space during mmap's.
664  */
665 #define __TASK_UNMAPPED_BASE(task_size)	(PAGE_ALIGN(task_size / 3))
666 #define TASK_UNMAPPED_BASE		__TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
667 
668 #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
669 
670 /* Get/set a process' ability to use the timestamp counter instruction */
671 #define GET_TSC_CTL(adr)	get_tsc_mode((adr))
672 #define SET_TSC_CTL(val)	set_tsc_mode((val))
673 
674 extern int get_tsc_mode(unsigned long adr);
675 extern int set_tsc_mode(unsigned int val);
676 
677 DECLARE_PER_CPU(u64, msr_misc_features_shadow);
678 
679 extern u16 get_llc_id(unsigned int cpu);
680 
681 #ifdef CONFIG_CPU_SUP_AMD
682 extern u32 amd_get_nodes_per_socket(void);
683 extern u32 amd_get_highest_perf(void);
684 #else
685 static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
686 static inline u32 amd_get_highest_perf(void)		{ return 0; }
687 #endif
688 
689 extern unsigned long arch_align_stack(unsigned long sp);
690 void free_init_pages(const char *what, unsigned long begin, unsigned long end);
691 extern void free_kernel_image_pages(const char *what, void *begin, void *end);
692 
693 void default_idle(void);
694 #ifdef	CONFIG_XEN
695 bool xen_set_default_idle(void);
696 #else
697 #define xen_set_default_idle 0
698 #endif
699 
700 void __noreturn stop_this_cpu(void *dummy);
701 void microcode_check(struct cpuinfo_x86 *prev_info);
702 void store_cpu_caps(struct cpuinfo_x86 *info);
703 
704 enum l1tf_mitigations {
705 	L1TF_MITIGATION_OFF,
706 	L1TF_MITIGATION_FLUSH_NOWARN,
707 	L1TF_MITIGATION_FLUSH,
708 	L1TF_MITIGATION_FLUSH_NOSMT,
709 	L1TF_MITIGATION_FULL,
710 	L1TF_MITIGATION_FULL_FORCE
711 };
712 
713 extern enum l1tf_mitigations l1tf_mitigation;
714 
715 enum mds_mitigations {
716 	MDS_MITIGATION_OFF,
717 	MDS_MITIGATION_FULL,
718 	MDS_MITIGATION_VMWERV,
719 };
720 
721 #ifdef CONFIG_X86_SGX
722 int arch_memory_failure(unsigned long pfn, int flags);
723 #define arch_memory_failure arch_memory_failure
724 
725 bool arch_is_platform_page(u64 paddr);
726 #define arch_is_platform_page arch_is_platform_page
727 #endif
728 
729 #endif /* _ASM_X86_PROCESSOR_H */
730