xref: /openbmc/linux/arch/x86/include/asm/cpu_entry_area.h (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #ifndef _ASM_X86_CPU_ENTRY_AREA_H
4 #define _ASM_X86_CPU_ENTRY_AREA_H
5 
6 #include <linux/percpu-defs.h>
7 #include <asm/processor.h>
8 #include <asm/intel_ds.h>
9 
10 /*
11  * cpu_entry_area is a percpu region that contains things needed by the CPU
12  * and early entry/exit code.  Real types aren't used for all fields here
13  * to avoid circular header dependencies.
14  *
15  * Every field is a virtual alias of some other allocated backing store.
16  * There is no direct allocation of a struct cpu_entry_area.
17  */
18 struct cpu_entry_area {
19 	char gdt[PAGE_SIZE];
20 
21 	/*
22 	 * The GDT is just below entry_stack and thus serves (on x86_64) as
23 	 * a a read-only guard page.
24 	 */
25 	struct entry_stack_page entry_stack_page;
26 
27 	/*
28 	 * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
29 	 * we need task switches to work, and task switches write to the TSS.
30 	 */
31 	struct tss_struct tss;
32 
33 #ifdef CONFIG_X86_64
34 	/*
35 	 * Exception stacks used for IST entries.
36 	 *
37 	 * In the future, this should have a separate slot for each stack
38 	 * with guard pages between them.
39 	 */
40 	char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
41 #endif
42 #ifdef CONFIG_CPU_SUP_INTEL
43 	/*
44 	 * Per CPU debug store for Intel performance monitoring. Wastes a
45 	 * full page at the moment.
46 	 */
47 	struct debug_store cpu_debug_store;
48 	/*
49 	 * The actual PEBS/BTS buffers must be mapped to user space
50 	 * Reserve enough fixmap PTEs.
51 	 */
52 	struct debug_store_buffers cpu_debug_buffers;
53 #endif
54 };
55 
56 #define CPU_ENTRY_AREA_SIZE	(sizeof(struct cpu_entry_area))
57 #define CPU_ENTRY_AREA_TOT_SIZE	(CPU_ENTRY_AREA_SIZE * NR_CPUS)
58 
59 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
60 
61 extern void setup_cpu_entry_areas(void);
62 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
63 
64 #define	CPU_ENTRY_AREA_RO_IDT		CPU_ENTRY_AREA_BASE
65 #define CPU_ENTRY_AREA_PER_CPU		(CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
66 
67 #define CPU_ENTRY_AREA_RO_IDT_VADDR	((void *)CPU_ENTRY_AREA_RO_IDT)
68 
69 #define CPU_ENTRY_AREA_MAP_SIZE			\
70 	(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
71 
72 extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
73 
74 static inline struct entry_stack *cpu_entry_stack(int cpu)
75 {
76 	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
77 }
78 
79 #endif
80