xref: /openbmc/linux/arch/x86/mm/cpu_entry_area.c (revision ce6cc6f7)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/spinlock.h>
4 #include <linux/percpu.h>
5 #include <linux/kallsyms.h>
6 #include <linux/kcore.h>
7 #include <linux/pgtable.h>
8 
9 #include <asm/cpu_entry_area.h>
10 #include <asm/fixmap.h>
11 #include <asm/desc.h>
12 #include <asm/kasan.h>
13 
14 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
15 
16 #ifdef CONFIG_X86_64
17 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
18 DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
19 
20 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
21 
22 static __always_inline unsigned int cea_offset(unsigned int cpu)
23 {
24 	return per_cpu(_cea_offset, cpu);
25 }
26 
27 static __init void init_cea_offsets(void)
28 {
29 	unsigned int max_cea;
30 	unsigned int i, j;
31 
32 	max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
33 
34 	/* O(sodding terrible) */
35 	for_each_possible_cpu(i) {
36 		unsigned int cea;
37 
38 again:
39 		cea = get_random_u32_below(max_cea);
40 
41 		for_each_possible_cpu(j) {
42 			if (cea_offset(j) == cea)
43 				goto again;
44 
45 			if (i == j)
46 				break;
47 		}
48 
49 		per_cpu(_cea_offset, i) = cea;
50 	}
51 }
52 #else /* !X86_64 */
53 DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
54 
55 static __always_inline unsigned int cea_offset(unsigned int cpu)
56 {
57 	return cpu;
58 }
59 static inline void init_cea_offsets(void) { }
60 #endif
61 
62 /* Is called from entry code, so must be noinstr */
63 noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
64 {
65 	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
66 	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
67 
68 	return (struct cpu_entry_area *) va;
69 }
70 EXPORT_SYMBOL(get_cpu_entry_area);
71 
72 void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
73 {
74 	unsigned long va = (unsigned long) cea_vaddr;
75 	pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
76 
77 	/*
78 	 * The cpu_entry_area is shared between the user and kernel
79 	 * page tables.  All of its ptes can safely be global.
80 	 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
81 	 * non-present PTEs, so be careful not to set it in that
82 	 * case to avoid confusion.
83 	 */
84 	if (boot_cpu_has(X86_FEATURE_PGE) &&
85 	    (pgprot_val(flags) & _PAGE_PRESENT))
86 		pte = pte_set_flags(pte, _PAGE_GLOBAL);
87 
88 	set_pte_vaddr(va, pte);
89 }
90 
91 static void __init
92 cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
93 {
94 	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
95 		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
96 }
97 
98 static void __init percpu_setup_debug_store(unsigned int cpu)
99 {
100 #ifdef CONFIG_CPU_SUP_INTEL
101 	unsigned int npages;
102 	void *cea;
103 
104 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
105 		return;
106 
107 	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
108 	npages = sizeof(struct debug_store) / PAGE_SIZE;
109 	BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
110 	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
111 			     PAGE_KERNEL);
112 
113 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
114 	/*
115 	 * Force the population of PMDs for not yet allocated per cpu
116 	 * memory like debug store buffers.
117 	 */
118 	npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
119 	for (; npages; npages--, cea += PAGE_SIZE)
120 		cea_set_pte(cea, 0, PAGE_NONE);
121 #endif
122 }
123 
124 #ifdef CONFIG_X86_64
125 
126 #define cea_map_stack(name) do {					\
127 	npages = sizeof(estacks->name## _stack) / PAGE_SIZE;		\
128 	cea_map_percpu_pages(cea->estacks.name## _stack,		\
129 			estacks->name## _stack, npages, PAGE_KERNEL);	\
130 	} while (0)
131 
132 static void __init percpu_setup_exception_stacks(unsigned int cpu)
133 {
134 	struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
135 	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
136 	unsigned int npages;
137 
138 	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
139 
140 	per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
141 
142 	/*
143 	 * The exceptions stack mappings in the per cpu area are protected
144 	 * by guard pages so each stack must be mapped separately. DB2 is
145 	 * not mapped; it just exists to catch triple nesting of #DB.
146 	 */
147 	cea_map_stack(DF);
148 	cea_map_stack(NMI);
149 	cea_map_stack(DB);
150 	cea_map_stack(MCE);
151 
152 	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
153 		if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
154 			cea_map_stack(VC);
155 			cea_map_stack(VC2);
156 		}
157 	}
158 }
159 #else
160 static inline void percpu_setup_exception_stacks(unsigned int cpu)
161 {
162 	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
163 
164 	cea_map_percpu_pages(&cea->doublefault_stack,
165 			     &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
166 }
167 #endif
168 
169 /* Setup the fixmap mappings only once per-processor */
170 static void __init setup_cpu_entry_area(unsigned int cpu)
171 {
172 	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
173 #ifdef CONFIG_X86_64
174 	/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
175 	pgprot_t gdt_prot = PAGE_KERNEL_RO;
176 	pgprot_t tss_prot = PAGE_KERNEL_RO;
177 #else
178 	/*
179 	 * On 32-bit systems, the GDT cannot be read-only because
180 	 * our double fault handler uses a task gate, and entering through
181 	 * a task gate needs to change an available TSS to busy.  If the
182 	 * GDT is read-only, that will triple fault.  The TSS cannot be
183 	 * read-only because the CPU writes to it on task switches.
184 	 */
185 	pgprot_t gdt_prot = PAGE_KERNEL;
186 	pgprot_t tss_prot = PAGE_KERNEL;
187 #endif
188 
189 	kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE,
190 					early_cpu_to_node(cpu));
191 
192 	cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
193 
194 	cea_map_percpu_pages(&cea->entry_stack_page,
195 			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
196 			     PAGE_KERNEL);
197 
198 	/*
199 	 * The Intel SDM says (Volume 3, 7.2.1):
200 	 *
201 	 *  Avoid placing a page boundary in the part of the TSS that the
202 	 *  processor reads during a task switch (the first 104 bytes). The
203 	 *  processor may not correctly perform address translations if a
204 	 *  boundary occurs in this area. During a task switch, the processor
205 	 *  reads and writes into the first 104 bytes of each TSS (using
206 	 *  contiguous physical addresses beginning with the physical address
207 	 *  of the first byte of the TSS). So, after TSS access begins, if
208 	 *  part of the 104 bytes is not physically contiguous, the processor
209 	 *  will access incorrect information without generating a page-fault
210 	 *  exception.
211 	 *
212 	 * There are also a lot of errata involving the TSS spanning a page
213 	 * boundary.  Assert that we're not doing that.
214 	 */
215 	BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
216 		      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
217 	BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
218 	/*
219 	 * VMX changes the host TR limit to 0x67 after a VM exit. This is
220 	 * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
221 	 * that this is correct.
222 	 */
223 	BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
224 	BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
225 
226 	cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
227 			     sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
228 
229 #ifdef CONFIG_X86_32
230 	per_cpu(cpu_entry_area, cpu) = cea;
231 #endif
232 
233 	percpu_setup_exception_stacks(cpu);
234 
235 	percpu_setup_debug_store(cpu);
236 }
237 
238 static __init void setup_cpu_entry_area_ptes(void)
239 {
240 #ifdef CONFIG_X86_32
241 	unsigned long start, end;
242 
243 	/* The +1 is for the readonly IDT: */
244 	BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
245 	BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
246 
247 	start = CPU_ENTRY_AREA_BASE;
248 	end = start + CPU_ENTRY_AREA_MAP_SIZE;
249 
250 	/* Careful here: start + PMD_SIZE might wrap around */
251 	for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
252 		populate_extra_pte(start);
253 #endif
254 }
255 
256 void __init setup_cpu_entry_areas(void)
257 {
258 	unsigned int cpu;
259 
260 	init_cea_offsets();
261 
262 	setup_cpu_entry_area_ptes();
263 
264 	for_each_possible_cpu(cpu)
265 		setup_cpu_entry_area(cpu);
266 
267 	/*
268 	 * This is the last essential update to swapper_pgdir which needs
269 	 * to be synchronized to initial_page_table on 32bit.
270 	 */
271 	sync_initial_page_table();
272 }
273