1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _ASM_X86_CPU_ENTRY_AREA_H
4 #define _ASM_X86_CPU_ENTRY_AREA_H
5 
6 #include <linux/percpu-defs.h>
7 #include <asm/processor.h>
8 #include <asm/intel_ds.h>
9 #include <asm/pgtable_areas.h>
10 
11 #ifdef CONFIG_X86_64
12 
13 /* Macro to enforce the same ordering and stack sizes */
14 #define ESTACKS_MEMBERS(guardsize)		\
15 	char	DF_stack_guard[guardsize];	\
16 	char	DF_stack[EXCEPTION_STKSZ];	\
17 	char	NMI_stack_guard[guardsize];	\
18 	char	NMI_stack[EXCEPTION_STKSZ];	\
19 	char	DB_stack_guard[guardsize];	\
20 	char	DB_stack[EXCEPTION_STKSZ];	\
21 	char	MCE_stack_guard[guardsize];	\
22 	char	MCE_stack[EXCEPTION_STKSZ];	\
23 	char	IST_top_guard[guardsize];	\
24 
25 /* The exception stacks' physical storage. No guard pages required */
26 struct exception_stacks {
27 	ESTACKS_MEMBERS(0)
28 };
29 
30 /* The effective cpu entry area mapping with guard pages. */
31 struct cea_exception_stacks {
32 	ESTACKS_MEMBERS(PAGE_SIZE)
33 };
34 
35 /*
36  * The exception stack ordering in [cea_]exception_stacks
37  */
38 enum exception_stack_ordering {
39 	ESTACK_DF,
40 	ESTACK_NMI,
41 	ESTACK_DB,
42 	ESTACK_MCE,
43 	N_EXCEPTION_STACKS
44 };
45 
46 #define CEA_ESTACK_SIZE(st)					\
47 	sizeof(((struct cea_exception_stacks *)0)->st## _stack)
48 
49 #define CEA_ESTACK_BOT(ceastp, st)				\
50 	((unsigned long)&(ceastp)->st## _stack)
51 
52 #define CEA_ESTACK_TOP(ceastp, st)				\
53 	(CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
54 
55 #define CEA_ESTACK_OFFS(st)					\
56 	offsetof(struct cea_exception_stacks, st## _stack)
57 
58 #define CEA_ESTACK_PAGES					\
59 	(sizeof(struct cea_exception_stacks) / PAGE_SIZE)
60 
61 #endif
62 
63 #ifdef CONFIG_X86_32
64 struct doublefault_stack {
65 	unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];
66 	struct x86_hw_tss tss;
67 } __aligned(PAGE_SIZE);
68 #endif
69 
70 /*
71  * cpu_entry_area is a percpu region that contains things needed by the CPU
72  * and early entry/exit code.  Real types aren't used for all fields here
73  * to avoid circular header dependencies.
74  *
75  * Every field is a virtual alias of some other allocated backing store.
76  * There is no direct allocation of a struct cpu_entry_area.
77  */
78 struct cpu_entry_area {
79 	char gdt[PAGE_SIZE];
80 
81 	/*
82 	 * The GDT is just below entry_stack and thus serves (on x86_64) as
83 	 * a read-only guard page. On 32-bit the GDT must be writeable, so
84 	 * it needs an extra guard page.
85 	 */
86 #ifdef CONFIG_X86_32
87 	char guard_entry_stack[PAGE_SIZE];
88 #endif
89 	struct entry_stack_page entry_stack_page;
90 
91 #ifdef CONFIG_X86_32
92 	char guard_doublefault_stack[PAGE_SIZE];
93 	struct doublefault_stack doublefault_stack;
94 #endif
95 
96 	/*
97 	 * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
98 	 * we need task switches to work, and task switches write to the TSS.
99 	 */
100 	struct tss_struct tss;
101 
102 #ifdef CONFIG_X86_64
103 	/*
104 	 * Exception stacks used for IST entries with guard pages.
105 	 */
106 	struct cea_exception_stacks estacks;
107 #endif
108 	/*
109 	 * Per CPU debug store for Intel performance monitoring. Wastes a
110 	 * full page at the moment.
111 	 */
112 	struct debug_store cpu_debug_store;
113 	/*
114 	 * The actual PEBS/BTS buffers must be mapped to user space
115 	 * Reserve enough fixmap PTEs.
116 	 */
117 	struct debug_store_buffers cpu_debug_buffers;
118 };
119 
120 #define CPU_ENTRY_AREA_SIZE		(sizeof(struct cpu_entry_area))
121 #define CPU_ENTRY_AREA_ARRAY_SIZE	(CPU_ENTRY_AREA_SIZE * NR_CPUS)
122 
123 /* Total size includes the readonly IDT mapping page as well: */
124 #define CPU_ENTRY_AREA_TOTAL_SIZE	(CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
125 
126 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
127 DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
128 
129 extern void setup_cpu_entry_areas(void);
130 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
131 
132 extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
133 
134 static inline struct entry_stack *cpu_entry_stack(int cpu)
135 {
136 	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
137 }
138 
139 #define __this_cpu_ist_top_va(name)					\
140 	CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
141 
142 #endif
143