xref: /openbmc/linux/arch/x86/mm/cpu_entry_area.c (revision cfbb9be8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/spinlock.h>
4 #include <linux/percpu.h>
5 
6 #include <asm/cpu_entry_area.h>
7 #include <asm/pgtable.h>
8 #include <asm/fixmap.h>
9 #include <asm/desc.h>
10 
11 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
12 
13 #ifdef CONFIG_X86_64
14 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
15 	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
16 #endif
17 
18 struct cpu_entry_area *get_cpu_entry_area(int cpu)
19 {
20 	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
21 	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
22 
23 	return (struct cpu_entry_area *) va;
24 }
25 EXPORT_SYMBOL(get_cpu_entry_area);
26 
27 void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
28 {
29 	unsigned long va = (unsigned long) cea_vaddr;
30 
31 	set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
32 }
33 
34 static void __init
35 cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
36 {
37 	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
38 		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
39 }
40 
41 static void percpu_setup_debug_store(int cpu)
42 {
43 #ifdef CONFIG_CPU_SUP_INTEL
44 	int npages;
45 	void *cea;
46 
47 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
48 		return;
49 
50 	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
51 	npages = sizeof(struct debug_store) / PAGE_SIZE;
52 	BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
53 	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
54 			     PAGE_KERNEL);
55 
56 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
57 	/*
58 	 * Force the population of PMDs for not yet allocated per cpu
59 	 * memory like debug store buffers.
60 	 */
61 	npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
62 	for (; npages; npages--, cea += PAGE_SIZE)
63 		cea_set_pte(cea, 0, PAGE_NONE);
64 #endif
65 }
66 
67 /* Setup the fixmap mappings only once per-processor */
68 static void __init setup_cpu_entry_area(int cpu)
69 {
70 #ifdef CONFIG_X86_64
71 	extern char _entry_trampoline[];
72 
73 	/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
74 	pgprot_t gdt_prot = PAGE_KERNEL_RO;
75 	pgprot_t tss_prot = PAGE_KERNEL_RO;
76 #else
77 	/*
78 	 * On native 32-bit systems, the GDT cannot be read-only because
79 	 * our double fault handler uses a task gate, and entering through
80 	 * a task gate needs to change an available TSS to busy.  If the
81 	 * GDT is read-only, that will triple fault.  The TSS cannot be
82 	 * read-only because the CPU writes to it on task switches.
83 	 *
84 	 * On Xen PV, the GDT must be read-only because the hypervisor
85 	 * requires it.
86 	 */
87 	pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
88 		PAGE_KERNEL_RO : PAGE_KERNEL;
89 	pgprot_t tss_prot = PAGE_KERNEL;
90 #endif
91 
92 	cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
93 		    gdt_prot);
94 
95 	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
96 			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
97 			     PAGE_KERNEL);
98 
99 	/*
100 	 * The Intel SDM says (Volume 3, 7.2.1):
101 	 *
102 	 *  Avoid placing a page boundary in the part of the TSS that the
103 	 *  processor reads during a task switch (the first 104 bytes). The
104 	 *  processor may not correctly perform address translations if a
105 	 *  boundary occurs in this area. During a task switch, the processor
106 	 *  reads and writes into the first 104 bytes of each TSS (using
107 	 *  contiguous physical addresses beginning with the physical address
108 	 *  of the first byte of the TSS). So, after TSS access begins, if
109 	 *  part of the 104 bytes is not physically contiguous, the processor
110 	 *  will access incorrect information without generating a page-fault
111 	 *  exception.
112 	 *
113 	 * There are also a lot of errata involving the TSS spanning a page
114 	 * boundary.  Assert that we're not doing that.
115 	 */
116 	BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
117 		      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
118 	BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
119 	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
120 			     &per_cpu(cpu_tss_rw, cpu),
121 			     sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
122 
123 #ifdef CONFIG_X86_32
124 	per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
125 #endif
126 
127 #ifdef CONFIG_X86_64
128 	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
129 	BUILD_BUG_ON(sizeof(exception_stacks) !=
130 		     sizeof(((struct cpu_entry_area *)0)->exception_stacks));
131 	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
132 			     &per_cpu(exception_stacks, cpu),
133 			     sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
134 
135 	cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
136 		     __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
137 #endif
138 	percpu_setup_debug_store(cpu);
139 }
140 
141 static __init void setup_cpu_entry_area_ptes(void)
142 {
143 #ifdef CONFIG_X86_32
144 	unsigned long start, end;
145 
146 	BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
147 	BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
148 
149 	start = CPU_ENTRY_AREA_BASE;
150 	end = start + CPU_ENTRY_AREA_MAP_SIZE;
151 
152 	/* Careful here: start + PMD_SIZE might wrap around */
153 	for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
154 		populate_extra_pte(start);
155 #endif
156 }
157 
158 void __init setup_cpu_entry_areas(void)
159 {
160 	unsigned int cpu;
161 
162 	setup_cpu_entry_area_ptes();
163 
164 	for_each_possible_cpu(cpu)
165 		setup_cpu_entry_area(cpu);
166 
167 	/*
168 	 * This is the last essential update to swapper_pgdir which needs
169 	 * to be synchronized to initial_page_table on 32bit.
170 	 */
171 	sync_initial_page_table();
172 }
173