16184488aSNishad Kamdar /* SPDX-License-Identifier: GPL-2.0 */
2ed1bbc40SThomas Gleixner
3ed1bbc40SThomas Gleixner #ifndef _ASM_X86_CPU_ENTRY_AREA_H
4ed1bbc40SThomas Gleixner #define _ASM_X86_CPU_ENTRY_AREA_H
5ed1bbc40SThomas Gleixner
6ed1bbc40SThomas Gleixner #include <linux/percpu-defs.h>
7ed1bbc40SThomas Gleixner #include <asm/processor.h>
810043e02SThomas Gleixner #include <asm/intel_ds.h>
9186525bdSIngo Molnar #include <asm/pgtable_areas.h>
10ed1bbc40SThomas Gleixner
11019b17b3SThomas Gleixner #ifdef CONFIG_X86_64
12019b17b3SThomas Gleixner
13541ac971SBorislav Petkov #ifdef CONFIG_AMD_MEM_ENCRYPT
14541ac971SBorislav Petkov #define VC_EXCEPTION_STKSZ EXCEPTION_STKSZ
15541ac971SBorislav Petkov #else
16541ac971SBorislav Petkov #define VC_EXCEPTION_STKSZ 0
17541ac971SBorislav Petkov #endif
18541ac971SBorislav Petkov
19019b17b3SThomas Gleixner /* Macro to enforce the same ordering and stack sizes */
2002772fb9SJoerg Roedel #define ESTACKS_MEMBERS(guardsize, optional_stack_size) \
21019b17b3SThomas Gleixner char DF_stack_guard[guardsize]; \
22019b17b3SThomas Gleixner char DF_stack[EXCEPTION_STKSZ]; \
23019b17b3SThomas Gleixner char NMI_stack_guard[guardsize]; \
24019b17b3SThomas Gleixner char NMI_stack[EXCEPTION_STKSZ]; \
25019b17b3SThomas Gleixner char DB_stack_guard[guardsize]; \
262a594d4cSThomas Gleixner char DB_stack[EXCEPTION_STKSZ]; \
27019b17b3SThomas Gleixner char MCE_stack_guard[guardsize]; \
28019b17b3SThomas Gleixner char MCE_stack[EXCEPTION_STKSZ]; \
2902772fb9SJoerg Roedel char VC_stack_guard[guardsize]; \
3002772fb9SJoerg Roedel char VC_stack[optional_stack_size]; \
3102772fb9SJoerg Roedel char VC2_stack_guard[guardsize]; \
3202772fb9SJoerg Roedel char VC2_stack[optional_stack_size]; \
33019b17b3SThomas Gleixner char IST_top_guard[guardsize]; \
34019b17b3SThomas Gleixner
35019b17b3SThomas Gleixner /* The exception stacks' physical storage. No guard pages required */
36019b17b3SThomas Gleixner struct exception_stacks {
37541ac971SBorislav Petkov ESTACKS_MEMBERS(0, VC_EXCEPTION_STKSZ)
38019b17b3SThomas Gleixner };
39019b17b3SThomas Gleixner
401bdb67e5SThomas Gleixner /* The effective cpu entry area mapping with guard pages. */
41019b17b3SThomas Gleixner struct cea_exception_stacks {
4202772fb9SJoerg Roedel ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
43019b17b3SThomas Gleixner };
44019b17b3SThomas Gleixner
4532074269SThomas Gleixner /*
4632074269SThomas Gleixner * The exception stack ordering in [cea_]exception_stacks
4732074269SThomas Gleixner */
4832074269SThomas Gleixner enum exception_stack_ordering {
4932074269SThomas Gleixner ESTACK_DF,
5032074269SThomas Gleixner ESTACK_NMI,
5132074269SThomas Gleixner ESTACK_DB,
5232074269SThomas Gleixner ESTACK_MCE,
5302772fb9SJoerg Roedel ESTACK_VC,
5402772fb9SJoerg Roedel ESTACK_VC2,
5532074269SThomas Gleixner N_EXCEPTION_STACKS
5632074269SThomas Gleixner };
5732074269SThomas Gleixner
58019b17b3SThomas Gleixner #define CEA_ESTACK_SIZE(st) \
59019b17b3SThomas Gleixner sizeof(((struct cea_exception_stacks *)0)->st## _stack)
60019b17b3SThomas Gleixner
61019b17b3SThomas Gleixner #define CEA_ESTACK_BOT(ceastp, st) \
62019b17b3SThomas Gleixner ((unsigned long)&(ceastp)->st## _stack)
63019b17b3SThomas Gleixner
64019b17b3SThomas Gleixner #define CEA_ESTACK_TOP(ceastp, st) \
65019b17b3SThomas Gleixner (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
66019b17b3SThomas Gleixner
67019b17b3SThomas Gleixner #define CEA_ESTACK_OFFS(st) \
68019b17b3SThomas Gleixner offsetof(struct cea_exception_stacks, st## _stack)
69019b17b3SThomas Gleixner
70019b17b3SThomas Gleixner #define CEA_ESTACK_PAGES \
71019b17b3SThomas Gleixner (sizeof(struct cea_exception_stacks) / PAGE_SIZE)
72019b17b3SThomas Gleixner
73019b17b3SThomas Gleixner #endif
74019b17b3SThomas Gleixner
75dc4e0021SAndy Lutomirski #ifdef CONFIG_X86_32
76dc4e0021SAndy Lutomirski struct doublefault_stack {
77dc4e0021SAndy Lutomirski unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];
78dc4e0021SAndy Lutomirski struct x86_hw_tss tss;
79dc4e0021SAndy Lutomirski } __aligned(PAGE_SIZE);
80dc4e0021SAndy Lutomirski #endif
81dc4e0021SAndy Lutomirski
82ed1bbc40SThomas Gleixner /*
83ed1bbc40SThomas Gleixner * cpu_entry_area is a percpu region that contains things needed by the CPU
84ed1bbc40SThomas Gleixner * and early entry/exit code. Real types aren't used for all fields here
85ed1bbc40SThomas Gleixner * to avoid circular header dependencies.
86ed1bbc40SThomas Gleixner *
87ed1bbc40SThomas Gleixner * Every field is a virtual alias of some other allocated backing store.
88ed1bbc40SThomas Gleixner * There is no direct allocation of a struct cpu_entry_area.
89ed1bbc40SThomas Gleixner */
90ed1bbc40SThomas Gleixner struct cpu_entry_area {
91ed1bbc40SThomas Gleixner char gdt[PAGE_SIZE];
92ed1bbc40SThomas Gleixner
93ed1bbc40SThomas Gleixner /*
94ed1bbc40SThomas Gleixner * The GDT is just below entry_stack and thus serves (on x86_64) as
95880a98c3SThomas Gleixner * a read-only guard page. On 32-bit the GDT must be writeable, so
96880a98c3SThomas Gleixner * it needs an extra guard page.
97ed1bbc40SThomas Gleixner */
98880a98c3SThomas Gleixner #ifdef CONFIG_X86_32
99880a98c3SThomas Gleixner char guard_entry_stack[PAGE_SIZE];
100880a98c3SThomas Gleixner #endif
101ed1bbc40SThomas Gleixner struct entry_stack_page entry_stack_page;
102ed1bbc40SThomas Gleixner
103dc4e0021SAndy Lutomirski #ifdef CONFIG_X86_32
104dc4e0021SAndy Lutomirski char guard_doublefault_stack[PAGE_SIZE];
105dc4e0021SAndy Lutomirski struct doublefault_stack doublefault_stack;
106dc4e0021SAndy Lutomirski #endif
107dc4e0021SAndy Lutomirski
108ed1bbc40SThomas Gleixner /*
109ed1bbc40SThomas Gleixner * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
110ed1bbc40SThomas Gleixner * we need task switches to work, and task switches write to the TSS.
111ed1bbc40SThomas Gleixner */
112ed1bbc40SThomas Gleixner struct tss_struct tss;
113ed1bbc40SThomas Gleixner
114ed1bbc40SThomas Gleixner #ifdef CONFIG_X86_64
115ed1bbc40SThomas Gleixner /*
116019b17b3SThomas Gleixner * Exception stacks used for IST entries with guard pages.
117ed1bbc40SThomas Gleixner */
118019b17b3SThomas Gleixner struct cea_exception_stacks estacks;
119ed1bbc40SThomas Gleixner #endif
12010043e02SThomas Gleixner /*
12110043e02SThomas Gleixner * Per CPU debug store for Intel performance monitoring. Wastes a
12210043e02SThomas Gleixner * full page at the moment.
12310043e02SThomas Gleixner */
12410043e02SThomas Gleixner struct debug_store cpu_debug_store;
12510043e02SThomas Gleixner /*
12610043e02SThomas Gleixner * The actual PEBS/BTS buffers must be mapped to user space
12710043e02SThomas Gleixner * Reserve enough fixmap PTEs.
12810043e02SThomas Gleixner */
12910043e02SThomas Gleixner struct debug_store_buffers cpu_debug_buffers;
130ed1bbc40SThomas Gleixner };
131ed1bbc40SThomas Gleixner
132ed1bbc40SThomas Gleixner #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
133ed1bbc40SThomas Gleixner
134ed1bbc40SThomas Gleixner DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
1357623f37eSThomas Gleixner DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
136ed1bbc40SThomas Gleixner
137ed1bbc40SThomas Gleixner extern void setup_cpu_entry_areas(void);
13892a0f81dSThomas Gleixner extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
13992a0f81dSThomas Gleixner
14092a0f81dSThomas Gleixner extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
14192a0f81dSThomas Gleixner
cpu_entry_stack(int cpu)142*e87f4152SBorislav Petkov static __always_inline struct entry_stack *cpu_entry_stack(int cpu)
14392a0f81dSThomas Gleixner {
14492a0f81dSThomas Gleixner return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
14592a0f81dSThomas Gleixner }
146ed1bbc40SThomas Gleixner
1477623f37eSThomas Gleixner #define __this_cpu_ist_top_va(name) \
1487623f37eSThomas Gleixner CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
1497623f37eSThomas Gleixner
15002772fb9SJoerg Roedel #define __this_cpu_ist_bottom_va(name) \
15102772fb9SJoerg Roedel CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)
15202772fb9SJoerg Roedel
153ed1bbc40SThomas Gleixner #endif
154