1 // SPDX-License-Identifier: GPL-2.0
2 
3 #ifndef _ASM_X86_CPU_ENTRY_AREA_H
4 #define _ASM_X86_CPU_ENTRY_AREA_H
5 
6 #include <linux/percpu-defs.h>
7 #include <asm/processor.h>
8 #include <asm/intel_ds.h>
9 
10 /*
11  * cpu_entry_area is a percpu region that contains things needed by the CPU
12  * and early entry/exit code.  Real types aren't used for all fields here
13  * to avoid circular header dependencies.
14  *
15  * Every field is a virtual alias of some other allocated backing store.
16  * There is no direct allocation of a struct cpu_entry_area.
17  */
18 struct cpu_entry_area {
19 	char gdt[PAGE_SIZE];
20 
21 	/*
22 	 * The GDT is just below entry_stack and thus serves (on x86_64) as
23 	 * a a read-only guard page.
24 	 */
25 	struct entry_stack_page entry_stack_page;
26 
27 	/*
28 	 * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
29 	 * we need task switches to work, and task switches write to the TSS.
30 	 */
31 	struct tss_struct tss;
32 
33 	char entry_trampoline[PAGE_SIZE];
34 
35 #ifdef CONFIG_X86_64
36 	/*
37 	 * Exception stacks used for IST entries.
38 	 *
39 	 * In the future, this should have a separate slot for each stack
40 	 * with guard pages between them.
41 	 */
42 	char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
43 #endif
44 #ifdef CONFIG_CPU_SUP_INTEL
45 	/*
46 	 * Per CPU debug store for Intel performance monitoring. Wastes a
47 	 * full page at the moment.
48 	 */
49 	struct debug_store cpu_debug_store;
50 	/*
51 	 * The actual PEBS/BTS buffers must be mapped to user space
52 	 * Reserve enough fixmap PTEs.
53 	 */
54 	struct debug_store_buffers cpu_debug_buffers;
55 #endif
56 };
57 
58 #define CPU_ENTRY_AREA_SIZE	(sizeof(struct cpu_entry_area))
59 #define CPU_ENTRY_AREA_TOT_SIZE	(CPU_ENTRY_AREA_SIZE * NR_CPUS)
60 
61 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
62 
63 extern void setup_cpu_entry_areas(void);
64 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
65 
66 #define	CPU_ENTRY_AREA_RO_IDT		CPU_ENTRY_AREA_BASE
67 #define CPU_ENTRY_AREA_PER_CPU		(CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
68 
69 #define CPU_ENTRY_AREA_RO_IDT_VADDR	((void *)CPU_ENTRY_AREA_RO_IDT)
70 
71 #define CPU_ENTRY_AREA_MAP_SIZE			\
72 	(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
73 
74 extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
75 
76 static inline struct entry_stack *cpu_entry_stack(int cpu)
77 {
78 	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
79 }
80 
81 #endif
82