xref: /openbmc/linux/arch/x86/mm/cpu_entry_area.c (revision a3f547ad)
1ed1bbc40SThomas Gleixner // SPDX-License-Identifier: GPL-2.0
2ed1bbc40SThomas Gleixner 
3ed1bbc40SThomas Gleixner #include <linux/spinlock.h>
4ed1bbc40SThomas Gleixner #include <linux/percpu.h>
5d83212d5SAlexander Shishkin #include <linux/kallsyms.h>
66855dc41SAdrian Hunter #include <linux/kcore.h>
765fddcfcSMike Rapoport #include <linux/pgtable.h>
8ed1bbc40SThomas Gleixner 
9ed1bbc40SThomas Gleixner #include <asm/cpu_entry_area.h>
10ed1bbc40SThomas Gleixner #include <asm/fixmap.h>
11ed1bbc40SThomas Gleixner #include <asm/desc.h>
123f148f33SAndrey Ryabinin #include <asm/kasan.h>
13*a3f547adSMichal Koutný #include <asm/setup.h>
14ed1bbc40SThomas Gleixner 
15ed1bbc40SThomas Gleixner static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
16ed1bbc40SThomas Gleixner 
17ed1bbc40SThomas Gleixner #ifdef CONFIG_X86_64
18019b17b3SThomas Gleixner static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
197623f37eSThomas Gleixner DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
20ed1bbc40SThomas Gleixner 
2197e3d26bSPeter Zijlstra static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
2297e3d26bSPeter Zijlstra 
cea_offset(unsigned int cpu)2397e3d26bSPeter Zijlstra static __always_inline unsigned int cea_offset(unsigned int cpu)
2497e3d26bSPeter Zijlstra {
2597e3d26bSPeter Zijlstra 	return per_cpu(_cea_offset, cpu);
2697e3d26bSPeter Zijlstra }
2797e3d26bSPeter Zijlstra 
init_cea_offsets(void)2897e3d26bSPeter Zijlstra static __init void init_cea_offsets(void)
2997e3d26bSPeter Zijlstra {
3097e3d26bSPeter Zijlstra 	unsigned int max_cea;
3197e3d26bSPeter Zijlstra 	unsigned int i, j;
3297e3d26bSPeter Zijlstra 
33*a3f547adSMichal Koutný 	if (!kaslr_enabled()) {
34*a3f547adSMichal Koutný 		for_each_possible_cpu(i)
35*a3f547adSMichal Koutný 			per_cpu(_cea_offset, i) = i;
36*a3f547adSMichal Koutný 		return;
37*a3f547adSMichal Koutný 	}
38*a3f547adSMichal Koutný 
3997e3d26bSPeter Zijlstra 	max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
4097e3d26bSPeter Zijlstra 
4197e3d26bSPeter Zijlstra 	/* O(sodding terrible) */
4297e3d26bSPeter Zijlstra 	for_each_possible_cpu(i) {
4397e3d26bSPeter Zijlstra 		unsigned int cea;
4497e3d26bSPeter Zijlstra 
4597e3d26bSPeter Zijlstra again:
463c202d14SJason A. Donenfeld 		cea = get_random_u32_below(max_cea);
4797e3d26bSPeter Zijlstra 
4897e3d26bSPeter Zijlstra 		for_each_possible_cpu(j) {
4997e3d26bSPeter Zijlstra 			if (cea_offset(j) == cea)
5097e3d26bSPeter Zijlstra 				goto again;
5197e3d26bSPeter Zijlstra 
5297e3d26bSPeter Zijlstra 			if (i == j)
5397e3d26bSPeter Zijlstra 				break;
5497e3d26bSPeter Zijlstra 		}
5597e3d26bSPeter Zijlstra 
5697e3d26bSPeter Zijlstra 		per_cpu(_cea_offset, i) = cea;
5797e3d26bSPeter Zijlstra 	}
5897e3d26bSPeter Zijlstra }
5997e3d26bSPeter Zijlstra #else /* !X86_64 */
60dc4e0021SAndy Lutomirski DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
6197e3d26bSPeter Zijlstra 
cea_offset(unsigned int cpu)6297e3d26bSPeter Zijlstra static __always_inline unsigned int cea_offset(unsigned int cpu)
6397e3d26bSPeter Zijlstra {
6497e3d26bSPeter Zijlstra 	return cpu;
6597e3d26bSPeter Zijlstra }
init_cea_offsets(void)6697e3d26bSPeter Zijlstra static inline void init_cea_offsets(void) { }
67dc4e0021SAndy Lutomirski #endif
68dc4e0021SAndy Lutomirski 
696b27edd7SJoerg Roedel /* Is called from entry code, so must be noinstr */
get_cpu_entry_area(int cpu)706b27edd7SJoerg Roedel noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
71ed1bbc40SThomas Gleixner {
7297e3d26bSPeter Zijlstra 	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
7392a0f81dSThomas Gleixner 	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
7492a0f81dSThomas Gleixner 
7592a0f81dSThomas Gleixner 	return (struct cpu_entry_area *) va;
7692a0f81dSThomas Gleixner }
7792a0f81dSThomas Gleixner EXPORT_SYMBOL(get_cpu_entry_area);
7892a0f81dSThomas Gleixner 
cea_set_pte(void * cea_vaddr,phys_addr_t pa,pgprot_t flags)7992a0f81dSThomas Gleixner void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
8092a0f81dSThomas Gleixner {
8192a0f81dSThomas Gleixner 	unsigned long va = (unsigned long) cea_vaddr;
820f561fceSDave Hansen 	pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
8392a0f81dSThomas Gleixner 
840f561fceSDave Hansen 	/*
850f561fceSDave Hansen 	 * The cpu_entry_area is shared between the user and kernel
860f561fceSDave Hansen 	 * page tables.  All of its ptes can safely be global.
870f561fceSDave Hansen 	 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
880f561fceSDave Hansen 	 * non-present PTEs, so be careful not to set it in that
890f561fceSDave Hansen 	 * case to avoid confusion.
900f561fceSDave Hansen 	 */
910f561fceSDave Hansen 	if (boot_cpu_has(X86_FEATURE_PGE) &&
920f561fceSDave Hansen 	    (pgprot_val(flags) & _PAGE_PRESENT))
930f561fceSDave Hansen 		pte = pte_set_flags(pte, _PAGE_GLOBAL);
940f561fceSDave Hansen 
950f561fceSDave Hansen 	set_pte_vaddr(va, pte);
9692a0f81dSThomas Gleixner }
9792a0f81dSThomas Gleixner 
9892a0f81dSThomas Gleixner static void __init
cea_map_percpu_pages(void * cea_vaddr,void * ptr,int pages,pgprot_t prot)9992a0f81dSThomas Gleixner cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
10092a0f81dSThomas Gleixner {
10192a0f81dSThomas Gleixner 	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
10292a0f81dSThomas Gleixner 		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
103ed1bbc40SThomas Gleixner }
104ed1bbc40SThomas Gleixner 
percpu_setup_debug_store(unsigned int cpu)105881a463cSThomas Gleixner static void __init percpu_setup_debug_store(unsigned int cpu)
10610043e02SThomas Gleixner {
10710043e02SThomas Gleixner #ifdef CONFIG_CPU_SUP_INTEL
108881a463cSThomas Gleixner 	unsigned int npages;
10910043e02SThomas Gleixner 	void *cea;
11010043e02SThomas Gleixner 
11110043e02SThomas Gleixner 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
11210043e02SThomas Gleixner 		return;
11310043e02SThomas Gleixner 
11410043e02SThomas Gleixner 	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
11510043e02SThomas Gleixner 	npages = sizeof(struct debug_store) / PAGE_SIZE;
11610043e02SThomas Gleixner 	BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
11710043e02SThomas Gleixner 	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
11810043e02SThomas Gleixner 			     PAGE_KERNEL);
11910043e02SThomas Gleixner 
12010043e02SThomas Gleixner 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
12110043e02SThomas Gleixner 	/*
12210043e02SThomas Gleixner 	 * Force the population of PMDs for not yet allocated per cpu
12310043e02SThomas Gleixner 	 * memory like debug store buffers.
12410043e02SThomas Gleixner 	 */
12510043e02SThomas Gleixner 	npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
12610043e02SThomas Gleixner 	for (; npages; npages--, cea += PAGE_SIZE)
12710043e02SThomas Gleixner 		cea_set_pte(cea, 0, PAGE_NONE);
12810043e02SThomas Gleixner #endif
12910043e02SThomas Gleixner }
13010043e02SThomas Gleixner 
131a4af767aSThomas Gleixner #ifdef CONFIG_X86_64
132a4af767aSThomas Gleixner 
133a4af767aSThomas Gleixner #define cea_map_stack(name) do {					\
134a4af767aSThomas Gleixner 	npages = sizeof(estacks->name## _stack) / PAGE_SIZE;		\
135a4af767aSThomas Gleixner 	cea_map_percpu_pages(cea->estacks.name## _stack,		\
136a4af767aSThomas Gleixner 			estacks->name## _stack, npages, PAGE_KERNEL);	\
137a4af767aSThomas Gleixner 	} while (0)
138a4af767aSThomas Gleixner 
percpu_setup_exception_stacks(unsigned int cpu)139a4af767aSThomas Gleixner static void __init percpu_setup_exception_stacks(unsigned int cpu)
140a4af767aSThomas Gleixner {
141a4af767aSThomas Gleixner 	struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
142a4af767aSThomas Gleixner 	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
143a4af767aSThomas Gleixner 	unsigned int npages;
144a4af767aSThomas Gleixner 
145a4af767aSThomas Gleixner 	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
1467623f37eSThomas Gleixner 
1477623f37eSThomas Gleixner 	per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
1487623f37eSThomas Gleixner 
149a4af767aSThomas Gleixner 	/*
150a4af767aSThomas Gleixner 	 * The exceptions stack mappings in the per cpu area are protected
1512a594d4cSThomas Gleixner 	 * by guard pages so each stack must be mapped separately. DB2 is
1522a594d4cSThomas Gleixner 	 * not mapped; it just exists to catch triple nesting of #DB.
153a4af767aSThomas Gleixner 	 */
154a4af767aSThomas Gleixner 	cea_map_stack(DF);
155a4af767aSThomas Gleixner 	cea_map_stack(NMI);
156a4af767aSThomas Gleixner 	cea_map_stack(DB);
157a4af767aSThomas Gleixner 	cea_map_stack(MCE);
158541ac971SBorislav Petkov 
159541ac971SBorislav Petkov 	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
160541ac971SBorislav Petkov 		if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
161541ac971SBorislav Petkov 			cea_map_stack(VC);
162541ac971SBorislav Petkov 			cea_map_stack(VC2);
163541ac971SBorislav Petkov 		}
164541ac971SBorislav Petkov 	}
165a4af767aSThomas Gleixner }
166a4af767aSThomas Gleixner #else
percpu_setup_exception_stacks(unsigned int cpu)167dc4e0021SAndy Lutomirski static inline void percpu_setup_exception_stacks(unsigned int cpu)
168dc4e0021SAndy Lutomirski {
169dc4e0021SAndy Lutomirski 	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
170dc4e0021SAndy Lutomirski 
171dc4e0021SAndy Lutomirski 	cea_map_percpu_pages(&cea->doublefault_stack,
172dc4e0021SAndy Lutomirski 			     &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
173dc4e0021SAndy Lutomirski }
174a4af767aSThomas Gleixner #endif
175a4af767aSThomas Gleixner 
176ed1bbc40SThomas Gleixner /* Setup the fixmap mappings only once per-processor */
setup_cpu_entry_area(unsigned int cpu)177881a463cSThomas Gleixner static void __init setup_cpu_entry_area(unsigned int cpu)
178ed1bbc40SThomas Gleixner {
179881a463cSThomas Gleixner 	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
180ed1bbc40SThomas Gleixner #ifdef CONFIG_X86_64
181ed1bbc40SThomas Gleixner 	/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
182ed1bbc40SThomas Gleixner 	pgprot_t gdt_prot = PAGE_KERNEL_RO;
183ed1bbc40SThomas Gleixner 	pgprot_t tss_prot = PAGE_KERNEL_RO;
184ed1bbc40SThomas Gleixner #else
185ed1bbc40SThomas Gleixner 	/*
186d76c4f7aSJuergen Gross 	 * On 32-bit systems, the GDT cannot be read-only because
187ed1bbc40SThomas Gleixner 	 * our double fault handler uses a task gate, and entering through
188ed1bbc40SThomas Gleixner 	 * a task gate needs to change an available TSS to busy.  If the
189ed1bbc40SThomas Gleixner 	 * GDT is read-only, that will triple fault.  The TSS cannot be
190ed1bbc40SThomas Gleixner 	 * read-only because the CPU writes to it on task switches.
191ed1bbc40SThomas Gleixner 	 */
192d76c4f7aSJuergen Gross 	pgprot_t gdt_prot = PAGE_KERNEL;
193ed1bbc40SThomas Gleixner 	pgprot_t tss_prot = PAGE_KERNEL;
194ed1bbc40SThomas Gleixner #endif
195ed1bbc40SThomas Gleixner 
19697650148SSean Christopherson 	kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE,
19797650148SSean Christopherson 					early_cpu_to_node(cpu));
19897650148SSean Christopherson 
199881a463cSThomas Gleixner 	cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
20092a0f81dSThomas Gleixner 
201881a463cSThomas Gleixner 	cea_map_percpu_pages(&cea->entry_stack_page,
202ed1bbc40SThomas Gleixner 			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
203ed1bbc40SThomas Gleixner 			     PAGE_KERNEL);
204ed1bbc40SThomas Gleixner 
205ed1bbc40SThomas Gleixner 	/*
206ed1bbc40SThomas Gleixner 	 * The Intel SDM says (Volume 3, 7.2.1):
207ed1bbc40SThomas Gleixner 	 *
208ed1bbc40SThomas Gleixner 	 *  Avoid placing a page boundary in the part of the TSS that the
209ed1bbc40SThomas Gleixner 	 *  processor reads during a task switch (the first 104 bytes). The
210ed1bbc40SThomas Gleixner 	 *  processor may not correctly perform address translations if a
211ed1bbc40SThomas Gleixner 	 *  boundary occurs in this area. During a task switch, the processor
212ed1bbc40SThomas Gleixner 	 *  reads and writes into the first 104 bytes of each TSS (using
213ed1bbc40SThomas Gleixner 	 *  contiguous physical addresses beginning with the physical address
214ed1bbc40SThomas Gleixner 	 *  of the first byte of the TSS). So, after TSS access begins, if
215ed1bbc40SThomas Gleixner 	 *  part of the 104 bytes is not physically contiguous, the processor
216ed1bbc40SThomas Gleixner 	 *  will access incorrect information without generating a page-fault
217ed1bbc40SThomas Gleixner 	 *  exception.
218ed1bbc40SThomas Gleixner 	 *
219ed1bbc40SThomas Gleixner 	 * There are also a lot of errata involving the TSS spanning a page
220ed1bbc40SThomas Gleixner 	 * boundary.  Assert that we're not doing that.
221ed1bbc40SThomas Gleixner 	 */
222ed1bbc40SThomas Gleixner 	BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
223ed1bbc40SThomas Gleixner 		      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
224ed1bbc40SThomas Gleixner 	BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
2256b546e1cSThomas Gleixner 	/*
2266b546e1cSThomas Gleixner 	 * VMX changes the host TR limit to 0x67 after a VM exit. This is
2276b546e1cSThomas Gleixner 	 * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
2286b546e1cSThomas Gleixner 	 * that this is correct.
2296b546e1cSThomas Gleixner 	 */
2306b546e1cSThomas Gleixner 	BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
2316b546e1cSThomas Gleixner 	BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
2326b546e1cSThomas Gleixner 
233881a463cSThomas Gleixner 	cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
23492a0f81dSThomas Gleixner 			     sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
235ed1bbc40SThomas Gleixner 
236ed1bbc40SThomas Gleixner #ifdef CONFIG_X86_32
237881a463cSThomas Gleixner 	per_cpu(cpu_entry_area, cpu) = cea;
238ed1bbc40SThomas Gleixner #endif
239ed1bbc40SThomas Gleixner 
240a4af767aSThomas Gleixner 	percpu_setup_exception_stacks(cpu);
241a4af767aSThomas Gleixner 
24210043e02SThomas Gleixner 	percpu_setup_debug_store(cpu);
243ed1bbc40SThomas Gleixner }
244ed1bbc40SThomas Gleixner 
setup_cpu_entry_area_ptes(void)24592a0f81dSThomas Gleixner static __init void setup_cpu_entry_area_ptes(void)
24692a0f81dSThomas Gleixner {
24792a0f81dSThomas Gleixner #ifdef CONFIG_X86_32
24892a0f81dSThomas Gleixner 	unsigned long start, end;
24992a0f81dSThomas Gleixner 
25005b042a1SIngo Molnar 	/* The +1 is for the readonly IDT: */
25105b042a1SIngo Molnar 	BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
25292a0f81dSThomas Gleixner 	BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
25392a0f81dSThomas Gleixner 
25492a0f81dSThomas Gleixner 	start = CPU_ENTRY_AREA_BASE;
25592a0f81dSThomas Gleixner 	end = start + CPU_ENTRY_AREA_MAP_SIZE;
25692a0f81dSThomas Gleixner 
257f6c4fd50SThomas Gleixner 	/* Careful here: start + PMD_SIZE might wrap around */
258f6c4fd50SThomas Gleixner 	for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
25992a0f81dSThomas Gleixner 		populate_extra_pte(start);
26092a0f81dSThomas Gleixner #endif
26192a0f81dSThomas Gleixner }
26292a0f81dSThomas Gleixner 
setup_cpu_entry_areas(void)263ed1bbc40SThomas Gleixner void __init setup_cpu_entry_areas(void)
264ed1bbc40SThomas Gleixner {
265ed1bbc40SThomas Gleixner 	unsigned int cpu;
266ed1bbc40SThomas Gleixner 
26797e3d26bSPeter Zijlstra 	init_cea_offsets();
26897e3d26bSPeter Zijlstra 
26992a0f81dSThomas Gleixner 	setup_cpu_entry_area_ptes();
27092a0f81dSThomas Gleixner 
271ed1bbc40SThomas Gleixner 	for_each_possible_cpu(cpu)
272ed1bbc40SThomas Gleixner 		setup_cpu_entry_area(cpu);
273945fd17aSThomas Gleixner 
274945fd17aSThomas Gleixner 	/*
275945fd17aSThomas Gleixner 	 * This is the last essential update to swapper_pgdir which needs
276945fd17aSThomas Gleixner 	 * to be synchronized to initial_page_table on 32bit.
277945fd17aSThomas Gleixner 	 */
278945fd17aSThomas Gleixner 	sync_initial_page_table();
279ed1bbc40SThomas Gleixner }
280