xref: /openbmc/linux/arch/x86/mm/cpu_entry_area.c (revision d2574c33)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/spinlock.h>
4 #include <linux/percpu.h>
5 #include <linux/kallsyms.h>
6 #include <linux/kcore.h>
7 
8 #include <asm/cpu_entry_area.h>
9 #include <asm/pgtable.h>
10 #include <asm/fixmap.h>
11 #include <asm/desc.h>
12 
13 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
14 
15 #ifdef CONFIG_X86_64
16 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
17 	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
18 #endif
19 
20 struct cpu_entry_area *get_cpu_entry_area(int cpu)
21 {
22 	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
23 	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
24 
25 	return (struct cpu_entry_area *) va;
26 }
27 EXPORT_SYMBOL(get_cpu_entry_area);
28 
29 void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
30 {
31 	unsigned long va = (unsigned long) cea_vaddr;
32 	pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
33 
34 	/*
35 	 * The cpu_entry_area is shared between the user and kernel
36 	 * page tables.  All of its ptes can safely be global.
37 	 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
38 	 * non-present PTEs, so be careful not to set it in that
39 	 * case to avoid confusion.
40 	 */
41 	if (boot_cpu_has(X86_FEATURE_PGE) &&
42 	    (pgprot_val(flags) & _PAGE_PRESENT))
43 		pte = pte_set_flags(pte, _PAGE_GLOBAL);
44 
45 	set_pte_vaddr(va, pte);
46 }
47 
48 static void __init
49 cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
50 {
51 	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
52 		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
53 }
54 
55 static void __init percpu_setup_debug_store(int cpu)
56 {
57 #ifdef CONFIG_CPU_SUP_INTEL
58 	int npages;
59 	void *cea;
60 
61 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
62 		return;
63 
64 	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
65 	npages = sizeof(struct debug_store) / PAGE_SIZE;
66 	BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
67 	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
68 			     PAGE_KERNEL);
69 
70 	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
71 	/*
72 	 * Force the population of PMDs for not yet allocated per cpu
73 	 * memory like debug store buffers.
74 	 */
75 	npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
76 	for (; npages; npages--, cea += PAGE_SIZE)
77 		cea_set_pte(cea, 0, PAGE_NONE);
78 #endif
79 }
80 
81 /* Setup the fixmap mappings only once per-processor */
82 static void __init setup_cpu_entry_area(int cpu)
83 {
84 #ifdef CONFIG_X86_64
85 	/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
86 	pgprot_t gdt_prot = PAGE_KERNEL_RO;
87 	pgprot_t tss_prot = PAGE_KERNEL_RO;
88 #else
89 	/*
90 	 * On native 32-bit systems, the GDT cannot be read-only because
91 	 * our double fault handler uses a task gate, and entering through
92 	 * a task gate needs to change an available TSS to busy.  If the
93 	 * GDT is read-only, that will triple fault.  The TSS cannot be
94 	 * read-only because the CPU writes to it on task switches.
95 	 *
96 	 * On Xen PV, the GDT must be read-only because the hypervisor
97 	 * requires it.
98 	 */
99 	pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
100 		PAGE_KERNEL_RO : PAGE_KERNEL;
101 	pgprot_t tss_prot = PAGE_KERNEL;
102 #endif
103 
104 	cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
105 		    gdt_prot);
106 
107 	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
108 			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
109 			     PAGE_KERNEL);
110 
111 	/*
112 	 * The Intel SDM says (Volume 3, 7.2.1):
113 	 *
114 	 *  Avoid placing a page boundary in the part of the TSS that the
115 	 *  processor reads during a task switch (the first 104 bytes). The
116 	 *  processor may not correctly perform address translations if a
117 	 *  boundary occurs in this area. During a task switch, the processor
118 	 *  reads and writes into the first 104 bytes of each TSS (using
119 	 *  contiguous physical addresses beginning with the physical address
120 	 *  of the first byte of the TSS). So, after TSS access begins, if
121 	 *  part of the 104 bytes is not physically contiguous, the processor
122 	 *  will access incorrect information without generating a page-fault
123 	 *  exception.
124 	 *
125 	 * There are also a lot of errata involving the TSS spanning a page
126 	 * boundary.  Assert that we're not doing that.
127 	 */
128 	BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
129 		      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
130 	BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
131 	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
132 			     &per_cpu(cpu_tss_rw, cpu),
133 			     sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
134 
135 #ifdef CONFIG_X86_32
136 	per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
137 #endif
138 
139 #ifdef CONFIG_X86_64
140 	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
141 	BUILD_BUG_ON(sizeof(exception_stacks) !=
142 		     sizeof(((struct cpu_entry_area *)0)->exception_stacks));
143 	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
144 			     &per_cpu(exception_stacks, cpu),
145 			     sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
146 #endif
147 	percpu_setup_debug_store(cpu);
148 }
149 
150 static __init void setup_cpu_entry_area_ptes(void)
151 {
152 #ifdef CONFIG_X86_32
153 	unsigned long start, end;
154 
155 	BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
156 	BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
157 
158 	start = CPU_ENTRY_AREA_BASE;
159 	end = start + CPU_ENTRY_AREA_MAP_SIZE;
160 
161 	/* Careful here: start + PMD_SIZE might wrap around */
162 	for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
163 		populate_extra_pte(start);
164 #endif
165 }
166 
167 void __init setup_cpu_entry_areas(void)
168 {
169 	unsigned int cpu;
170 
171 	setup_cpu_entry_area_ptes();
172 
173 	for_each_possible_cpu(cpu)
174 		setup_cpu_entry_area(cpu);
175 
176 	/*
177 	 * This is the last essential update to swapper_pgdir which needs
178 	 * to be synchronized to initial_page_table on 32bit.
179 	 */
180 	sync_initial_page_table();
181 }
182