xref: /openbmc/linux/arch/x86/kernel/setup_percpu.c (revision 82ced6fd)
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
19 #include <asm/cpu.h>
20 #include <asm/stackprotector.h>
21 
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
24 #else
25 # define DBG(x...)
26 #endif
27 
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
30 
31 #ifdef CONFIG_X86_64
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33 #else
34 #define BOOT_PERCPU_OFFSET 0
35 #endif
36 
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
39 
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41 	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
42 };
43 EXPORT_SYMBOL(__per_cpu_offset);
44 
45 /*
46  * On x86_64 symbols referenced from code should be reachable using
47  * 32bit relocations.  Reserve space for static percpu variables in
48  * modules so that they are always served from the first chunk which
49  * is located at the percpu segment base.  On x86_32, anything can
50  * address anywhere.  No need to reserve space in the first chunk.
51  */
52 #ifdef CONFIG_X86_64
53 #define PERCPU_FIRST_CHUNK_RESERVE	PERCPU_MODULE_RESERVE
54 #else
55 #define PERCPU_FIRST_CHUNK_RESERVE	0
56 #endif
57 
58 /**
59  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
60  *
61  * If NUMA is not configured or there is only one NUMA node available,
62  * there is no reason to consider NUMA.  This function determines
63  * whether percpu allocation should consider NUMA or not.
64  *
65  * RETURNS:
66  * true if NUMA should be considered; otherwise, false.
67  */
68 static bool __init pcpu_need_numa(void)
69 {
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71 	pg_data_t *last = NULL;
72 	unsigned int cpu;
73 
74 	for_each_possible_cpu(cpu) {
75 		int node = early_cpu_to_node(cpu);
76 
77 		if (node_online(node) && NODE_DATA(node) &&
78 		    last && last != NODE_DATA(node))
79 			return true;
80 
81 		last = NODE_DATA(node);
82 	}
83 #endif
84 	return false;
85 }
86 
87 /**
88  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89  * @cpu: cpu to allocate for
90  * @size: size allocation in bytes
91  * @align: alignment
92  *
93  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
94  * does the right thing for NUMA regardless of the current
95  * configuration.
96  *
97  * RETURNS:
98  * Pointer to the allocated area on success, NULL on failure.
99  */
100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 					unsigned long align)
102 {
103 	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105 	int node = early_cpu_to_node(cpu);
106 	void *ptr;
107 
108 	if (!node_online(node) || !NODE_DATA(node)) {
109 		ptr = __alloc_bootmem_nopanic(size, align, goal);
110 		pr_info("cpu %d has no node %d or node-local memory\n",
111 			cpu, node);
112 		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 			 cpu, size, __pa(ptr));
114 	} else {
115 		ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116 						   size, align, goal);
117 		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 			 "%016lx\n", cpu, size, node, __pa(ptr));
119 	}
120 	return ptr;
121 #else
122 	return __alloc_bootmem_nopanic(size, align, goal);
123 #endif
124 }
125 
126 /*
127  * Remap allocator
128  *
129  * This allocator uses PMD page as unit.  A PMD page is allocated for
130  * each cpu and each is remapped into vmalloc area using PMD mapping.
131  * As PMD page is quite large, only part of it is used for the first
132  * chunk.  Unused part is returned to the bootmem allocator.
133  *
134  * So, the PMD pages are mapped twice - once to the physical mapping
135  * and to the vmalloc area for the first percpu chunk.  The double
136  * mapping does add one more PMD TLB entry pressure but still is much
137  * better than only using 4k mappings while still being NUMA friendly.
138  */
139 #ifdef CONFIG_NEED_MULTIPLE_NODES
140 static size_t pcpur_size __initdata;
141 static void **pcpur_ptrs __initdata;
142 
143 static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
144 {
145 	size_t off = (size_t)pageno << PAGE_SHIFT;
146 
147 	if (off >= pcpur_size)
148 		return NULL;
149 
150 	return virt_to_page(pcpur_ptrs[cpu] + off);
151 }
152 
153 static ssize_t __init setup_pcpu_remap(size_t static_size)
154 {
155 	static struct vm_struct vm;
156 	size_t ptrs_size, dyn_size;
157 	unsigned int cpu;
158 	ssize_t ret;
159 
160 	/*
161 	 * If large page isn't supported, there's no benefit in doing
162 	 * this.  Also, on non-NUMA, embedding is better.
163 	 *
164 	 * NOTE: disabled for now.
165 	 */
166 	if (true || !cpu_has_pse || !pcpu_need_numa())
167 		return -EINVAL;
168 
169 	/*
170 	 * Currently supports only single page.  Supporting multiple
171 	 * pages won't be too difficult if it ever becomes necessary.
172 	 */
173 	pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
174 			       PERCPU_DYNAMIC_RESERVE);
175 	if (pcpur_size > PMD_SIZE) {
176 		pr_warning("PERCPU: static data is larger than large page, "
177 			   "can't use large page\n");
178 		return -EINVAL;
179 	}
180 	dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
181 
182 	/* allocate pointer array and alloc large pages */
183 	ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
184 	pcpur_ptrs = alloc_bootmem(ptrs_size);
185 
186 	for_each_possible_cpu(cpu) {
187 		pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
188 		if (!pcpur_ptrs[cpu])
189 			goto enomem;
190 
191 		/*
192 		 * Only use pcpur_size bytes and give back the rest.
193 		 *
194 		 * Ingo: The 2MB up-rounding bootmem is needed to make
195 		 * sure the partial 2MB page is still fully RAM - it's
196 		 * not well-specified to have a PAT-incompatible area
197 		 * (unmapped RAM, device memory, etc.) in that hole.
198 		 */
199 		free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
200 			     PMD_SIZE - pcpur_size);
201 
202 		memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
203 	}
204 
205 	/* allocate address and map */
206 	vm.flags = VM_ALLOC;
207 	vm.size = num_possible_cpus() * PMD_SIZE;
208 	vm_area_register_early(&vm, PMD_SIZE);
209 
210 	for_each_possible_cpu(cpu) {
211 		pmd_t *pmd;
212 
213 		pmd = populate_extra_pmd((unsigned long)vm.addr
214 					 + cpu * PMD_SIZE);
215 		set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
216 				     PAGE_KERNEL_LARGE));
217 	}
218 
219 	/* we're ready, commit */
220 	pr_info("PERCPU: Remapped at %p with large pages, static data "
221 		"%zu bytes\n", vm.addr, static_size);
222 
223 	ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
224 				     PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
225 				     PMD_SIZE, vm.addr, NULL);
226 	goto out_free_ar;
227 
228 enomem:
229 	for_each_possible_cpu(cpu)
230 		if (pcpur_ptrs[cpu])
231 			free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
232 	ret = -ENOMEM;
233 out_free_ar:
234 	free_bootmem(__pa(pcpur_ptrs), ptrs_size);
235 	return ret;
236 }
237 #else
238 static ssize_t __init setup_pcpu_remap(size_t static_size)
239 {
240 	return -EINVAL;
241 }
242 #endif
243 
244 /*
245  * Embedding allocator
246  *
247  * The first chunk is sized to just contain the static area plus
248  * module and dynamic reserves and embedded into linear physical
249  * mapping so that it can use PMD mapping without additional TLB
250  * pressure.
251  */
252 static ssize_t __init setup_pcpu_embed(size_t static_size)
253 {
254 	size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
255 
256 	/*
257 	 * If large page isn't supported, there's no benefit in doing
258 	 * this.  Also, embedding allocation doesn't play well with
259 	 * NUMA.
260 	 */
261 	if (!cpu_has_pse || pcpu_need_numa())
262 		return -EINVAL;
263 
264 	return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
265 				      reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
266 }
267 
268 /*
269  * 4k page allocator
270  *
271  * This is the basic allocator.  Static percpu area is allocated
272  * page-by-page and most of initialization is done by the generic
273  * setup function.
274  */
275 static struct page **pcpu4k_pages __initdata;
276 static int pcpu4k_nr_static_pages __initdata;
277 
278 static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
279 {
280 	if (pageno < pcpu4k_nr_static_pages)
281 		return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
282 	return NULL;
283 }
284 
285 static void __init pcpu4k_populate_pte(unsigned long addr)
286 {
287 	populate_extra_pte(addr);
288 }
289 
290 static ssize_t __init setup_pcpu_4k(size_t static_size)
291 {
292 	size_t pages_size;
293 	unsigned int cpu;
294 	int i, j;
295 	ssize_t ret;
296 
297 	pcpu4k_nr_static_pages = PFN_UP(static_size);
298 
299 	/* unaligned allocations can't be freed, round up to page size */
300 	pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
301 			       * sizeof(pcpu4k_pages[0]));
302 	pcpu4k_pages = alloc_bootmem(pages_size);
303 
304 	/* allocate and copy */
305 	j = 0;
306 	for_each_possible_cpu(cpu)
307 		for (i = 0; i < pcpu4k_nr_static_pages; i++) {
308 			void *ptr;
309 
310 			ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
311 			if (!ptr)
312 				goto enomem;
313 
314 			memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
315 			pcpu4k_pages[j++] = virt_to_page(ptr);
316 		}
317 
318 	/* we're ready, commit */
319 	pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
320 		pcpu4k_nr_static_pages, static_size);
321 
322 	ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
323 				     PERCPU_FIRST_CHUNK_RESERVE, -1,
324 				     -1, NULL, pcpu4k_populate_pte);
325 	goto out_free_ar;
326 
327 enomem:
328 	while (--j >= 0)
329 		free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
330 	ret = -ENOMEM;
331 out_free_ar:
332 	free_bootmem(__pa(pcpu4k_pages), pages_size);
333 	return ret;
334 }
335 
336 static inline void setup_percpu_segment(int cpu)
337 {
338 #ifdef CONFIG_X86_32
339 	struct desc_struct gdt;
340 
341 	pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
342 			0x2 | DESCTYPE_S, 0x8);
343 	gdt.s = 1;
344 	write_gdt_entry(get_cpu_gdt_table(cpu),
345 			GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
346 #endif
347 }
348 
349 /*
350  * Great future plan:
351  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
352  * Always point %gs to its beginning
353  */
354 void __init setup_per_cpu_areas(void)
355 {
356 	size_t static_size = __per_cpu_end - __per_cpu_start;
357 	unsigned int cpu;
358 	unsigned long delta;
359 	size_t pcpu_unit_size;
360 	ssize_t ret;
361 
362 	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
363 		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
364 
365 	/*
366 	 * Allocate percpu area.  If PSE is supported, try to make use
367 	 * of large page mappings.  Please read comments on top of
368 	 * each allocator for details.
369 	 */
370 	ret = setup_pcpu_remap(static_size);
371 	if (ret < 0)
372 		ret = setup_pcpu_embed(static_size);
373 	if (ret < 0)
374 		ret = setup_pcpu_4k(static_size);
375 	if (ret < 0)
376 		panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
377 		      static_size, ret);
378 
379 	pcpu_unit_size = ret;
380 
381 	/* alrighty, percpu areas up and running */
382 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
383 	for_each_possible_cpu(cpu) {
384 		per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
385 		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
386 		per_cpu(cpu_number, cpu) = cpu;
387 		setup_percpu_segment(cpu);
388 		setup_stack_canary_segment(cpu);
389 		/*
390 		 * Copy data used in early init routines from the
391 		 * initial arrays to the per cpu data areas.  These
392 		 * arrays then become expendable and the *_early_ptr's
393 		 * are zeroed indicating that the static arrays are
394 		 * gone.
395 		 */
396 #ifdef CONFIG_X86_LOCAL_APIC
397 		per_cpu(x86_cpu_to_apicid, cpu) =
398 			early_per_cpu_map(x86_cpu_to_apicid, cpu);
399 		per_cpu(x86_bios_cpu_apicid, cpu) =
400 			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
401 #endif
402 #ifdef CONFIG_X86_64
403 		per_cpu(irq_stack_ptr, cpu) =
404 			per_cpu(irq_stack_union.irq_stack, cpu) +
405 			IRQ_STACK_SIZE - 64;
406 #ifdef CONFIG_NUMA
407 		per_cpu(x86_cpu_to_node_map, cpu) =
408 			early_per_cpu_map(x86_cpu_to_node_map, cpu);
409 #endif
410 #endif
411 		/*
412 		 * Up to this point, the boot CPU has been using .data.init
413 		 * area.  Reload any changed state for the boot CPU.
414 		 */
415 		if (cpu == boot_cpu_id)
416 			switch_to_new_gdt(cpu);
417 	}
418 
419 	/* indicate the early static arrays will soon be gone */
420 #ifdef CONFIG_X86_LOCAL_APIC
421 	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
422 	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
423 #endif
424 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
425 	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
426 #endif
427 
428 	/* Setup node to cpumask map */
429 	setup_node_to_cpumask_map();
430 
431 	/* Setup cpu initialized, callin, callout masks */
432 	setup_cpu_local_masks();
433 }
434