xref: /openbmc/linux/arch/x86/mm/cpu_entry_area.c (revision f220d3eb)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/spinlock.h>
4 #include <linux/percpu.h>
5 #include <linux/kallsyms.h>
6 #include <linux/kcore.h>
7 
8 #include <asm/cpu_entry_area.h>
9 #include <asm/pgtable.h>
10 #include <asm/fixmap.h>
11 #include <asm/desc.h>
12 
13 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
14 
15 #ifdef CONFIG_X86_64
16 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
17 	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
18 static DEFINE_PER_CPU(struct kcore_list, kcore_entry_trampoline);
19 #endif
20 
21 struct cpu_entry_area *get_cpu_entry_area(int cpu)
22 {
23 	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
24 	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
25 
26 	return (struct cpu_entry_area *) va;
27 }
28 EXPORT_SYMBOL(get_cpu_entry_area);
29 
30 void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
31 {
32 	unsigned long va = (unsigned long) cea_vaddr;
33 	pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
34 
35 	/*
36 	 * The cpu_entry_area is shared between the user and kernel
37 	 * page tables.  All of its ptes can safely be global.
38 	 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
39 	 * non-present PTEs, so be careful not to set it in that
40 	 * case to avoid confusion.
41 	 */
42 	if (boot_cpu_has(X86_FEATURE_PGE) &&
43 	    (pgprot_val(flags) & _PAGE_PRESENT))
44 		pte = pte_set_flags(pte, _PAGE_GLOBAL);
45 
46 	set_pte_vaddr(va, pte);
47 }
48 
49 static void __init
50 cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
51 {
52 	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
53 		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
54 }
55 
56 static void percpu_setup_debug_store(int cpu)
57 {
58 #ifdef CONFIG_CPU_SUP_INTEL
59 	int npages;
60 	void *cea;
61 
62 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
63 		return;
64 
65 	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
66 	npages = sizeof(struct debug_store) / PAGE_SIZE;
67 	BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
68 	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
69 			     PAGE_KERNEL);
70 
71 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
72 	/*
73 	 * Force the population of PMDs for not yet allocated per cpu
74 	 * memory like debug store buffers.
75 	 */
76 	npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
77 	for (; npages; npages--, cea += PAGE_SIZE)
78 		cea_set_pte(cea, 0, PAGE_NONE);
79 #endif
80 }
81 
82 /* Setup the fixmap mappings only once per-processor */
83 static void __init setup_cpu_entry_area(int cpu)
84 {
85 #ifdef CONFIG_X86_64
86 	extern char _entry_trampoline[];
87 
88 	/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
89 	pgprot_t gdt_prot = PAGE_KERNEL_RO;
90 	pgprot_t tss_prot = PAGE_KERNEL_RO;
91 #else
92 	/*
93 	 * On native 32-bit systems, the GDT cannot be read-only because
94 	 * our double fault handler uses a task gate, and entering through
95 	 * a task gate needs to change an available TSS to busy.  If the
96 	 * GDT is read-only, that will triple fault.  The TSS cannot be
97 	 * read-only because the CPU writes to it on task switches.
98 	 *
99 	 * On Xen PV, the GDT must be read-only because the hypervisor
100 	 * requires it.
101 	 */
102 	pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
103 		PAGE_KERNEL_RO : PAGE_KERNEL;
104 	pgprot_t tss_prot = PAGE_KERNEL;
105 #endif
106 
107 	cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
108 		    gdt_prot);
109 
110 	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
111 			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
112 			     PAGE_KERNEL);
113 
114 	/*
115 	 * The Intel SDM says (Volume 3, 7.2.1):
116 	 *
117 	 *  Avoid placing a page boundary in the part of the TSS that the
118 	 *  processor reads during a task switch (the first 104 bytes). The
119 	 *  processor may not correctly perform address translations if a
120 	 *  boundary occurs in this area. During a task switch, the processor
121 	 *  reads and writes into the first 104 bytes of each TSS (using
122 	 *  contiguous physical addresses beginning with the physical address
123 	 *  of the first byte of the TSS). So, after TSS access begins, if
124 	 *  part of the 104 bytes is not physically contiguous, the processor
125 	 *  will access incorrect information without generating a page-fault
126 	 *  exception.
127 	 *
128 	 * There are also a lot of errata involving the TSS spanning a page
129 	 * boundary.  Assert that we're not doing that.
130 	 */
131 	BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
132 		      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
133 	BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
134 	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
135 			     &per_cpu(cpu_tss_rw, cpu),
136 			     sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
137 
138 #ifdef CONFIG_X86_32
139 	per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
140 #endif
141 
142 #ifdef CONFIG_X86_64
143 	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
144 	BUILD_BUG_ON(sizeof(exception_stacks) !=
145 		     sizeof(((struct cpu_entry_area *)0)->exception_stacks));
146 	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
147 			     &per_cpu(exception_stacks, cpu),
148 			     sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
149 
150 	cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
151 		     __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
152 	/*
153 	 * The cpu_entry_area alias addresses are not in the kernel binary
154 	 * so they do not show up in /proc/kcore normally.  This adds entries
155 	 * for them manually.
156 	 */
157 	kclist_add_remap(&per_cpu(kcore_entry_trampoline, cpu),
158 			 _entry_trampoline,
159 			 &get_cpu_entry_area(cpu)->entry_trampoline, PAGE_SIZE);
160 #endif
161 	percpu_setup_debug_store(cpu);
162 }
163 
164 #ifdef CONFIG_X86_64
165 int arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
166 		     char *name)
167 {
168 	unsigned int cpu, ncpu = 0;
169 
170 	if (symnum >= num_possible_cpus())
171 		return -EINVAL;
172 
173 	for_each_possible_cpu(cpu) {
174 		if (ncpu++ >= symnum)
175 			break;
176 	}
177 
178 	*value = (unsigned long)&get_cpu_entry_area(cpu)->entry_trampoline;
179 	*type = 't';
180 	strlcpy(name, "__entry_SYSCALL_64_trampoline", KSYM_NAME_LEN);
181 
182 	return 0;
183 }
184 #endif
185 
186 static __init void setup_cpu_entry_area_ptes(void)
187 {
188 #ifdef CONFIG_X86_32
189 	unsigned long start, end;
190 
191 	BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
192 	BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
193 
194 	start = CPU_ENTRY_AREA_BASE;
195 	end = start + CPU_ENTRY_AREA_MAP_SIZE;
196 
197 	/* Careful here: start + PMD_SIZE might wrap around */
198 	for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
199 		populate_extra_pte(start);
200 #endif
201 }
202 
203 void __init setup_cpu_entry_areas(void)
204 {
205 	unsigned int cpu;
206 
207 	setup_cpu_entry_area_ptes();
208 
209 	for_each_possible_cpu(cpu)
210 		setup_cpu_entry_area(cpu);
211 
212 	/*
213 	 * This is the last essential update to swapper_pgdir which needs
214 	 * to be synchronized to initial_page_table on 32bit.
215 	 */
216 	sync_initial_page_table();
217 }
218