xref: /openbmc/linux/arch/x86/kernel/setup_percpu.c (revision b04b4f78)
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
19 #include <asm/cpu.h>
20 #include <asm/stackprotector.h>
21 
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
24 #else
25 # define DBG(x...)
26 #endif
27 
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
30 
31 #ifdef CONFIG_X86_64
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33 #else
34 #define BOOT_PERCPU_OFFSET 0
35 #endif
36 
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
39 
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41 	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
42 };
43 EXPORT_SYMBOL(__per_cpu_offset);
44 
45 /*
46  * On x86_64 symbols referenced from code should be reachable using
47  * 32bit relocations.  Reserve space for static percpu variables in
48  * modules so that they are always served from the first chunk which
49  * is located at the percpu segment base.  On x86_32, anything can
50  * address anywhere.  No need to reserve space in the first chunk.
51  */
52 #ifdef CONFIG_X86_64
53 #define PERCPU_FIRST_CHUNK_RESERVE	PERCPU_MODULE_RESERVE
54 #else
55 #define PERCPU_FIRST_CHUNK_RESERVE	0
56 #endif
57 
58 /**
59  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
60  *
61  * If NUMA is not configured or there is only one NUMA node available,
62  * there is no reason to consider NUMA.  This function determines
63  * whether percpu allocation should consider NUMA or not.
64  *
65  * RETURNS:
66  * true if NUMA should be considered; otherwise, false.
67  */
68 static bool __init pcpu_need_numa(void)
69 {
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71 	pg_data_t *last = NULL;
72 	unsigned int cpu;
73 
74 	for_each_possible_cpu(cpu) {
75 		int node = early_cpu_to_node(cpu);
76 
77 		if (node_online(node) && NODE_DATA(node) &&
78 		    last && last != NODE_DATA(node))
79 			return true;
80 
81 		last = NODE_DATA(node);
82 	}
83 #endif
84 	return false;
85 }
86 
87 /**
88  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89  * @cpu: cpu to allocate for
90  * @size: size allocation in bytes
91  * @align: alignment
92  *
93  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
94  * does the right thing for NUMA regardless of the current
95  * configuration.
96  *
97  * RETURNS:
98  * Pointer to the allocated area on success, NULL on failure.
99  */
100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 					unsigned long align)
102 {
103 	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105 	int node = early_cpu_to_node(cpu);
106 	void *ptr;
107 
108 	if (!node_online(node) || !NODE_DATA(node)) {
109 		ptr = __alloc_bootmem_nopanic(size, align, goal);
110 		pr_info("cpu %d has no node %d or node-local memory\n",
111 			cpu, node);
112 		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 			 cpu, size, __pa(ptr));
114 	} else {
115 		ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116 						   size, align, goal);
117 		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 			 "%016lx\n", cpu, size, node, __pa(ptr));
119 	}
120 	return ptr;
121 #else
122 	return __alloc_bootmem_nopanic(size, align, goal);
123 #endif
124 }
125 
126 /*
127  * Remap allocator
128  *
129  * This allocator uses PMD page as unit.  A PMD page is allocated for
130  * each cpu and each is remapped into vmalloc area using PMD mapping.
131  * As PMD page is quite large, only part of it is used for the first
132  * chunk.  Unused part is returned to the bootmem allocator.
133  *
134  * So, the PMD pages are mapped twice - once to the physical mapping
135  * and to the vmalloc area for the first percpu chunk.  The double
136  * mapping does add one more PMD TLB entry pressure but still is much
137  * better than only using 4k mappings while still being NUMA friendly.
138  */
139 #ifdef CONFIG_NEED_MULTIPLE_NODES
140 static size_t pcpur_size __initdata;
141 static void **pcpur_ptrs __initdata;
142 
143 static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
144 {
145 	size_t off = (size_t)pageno << PAGE_SHIFT;
146 
147 	if (off >= pcpur_size)
148 		return NULL;
149 
150 	return virt_to_page(pcpur_ptrs[cpu] + off);
151 }
152 
153 static ssize_t __init setup_pcpu_remap(size_t static_size)
154 {
155 	static struct vm_struct vm;
156 	size_t ptrs_size, dyn_size;
157 	unsigned int cpu;
158 	ssize_t ret;
159 
160 	/*
161 	 * If large page isn't supported, there's no benefit in doing
162 	 * this.  Also, on non-NUMA, embedding is better.
163 	 */
164 	if (!cpu_has_pse || !pcpu_need_numa())
165 		return -EINVAL;
166 
167 	/*
168 	 * Currently supports only single page.  Supporting multiple
169 	 * pages won't be too difficult if it ever becomes necessary.
170 	 */
171 	pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
172 			       PERCPU_DYNAMIC_RESERVE);
173 	if (pcpur_size > PMD_SIZE) {
174 		pr_warning("PERCPU: static data is larger than large page, "
175 			   "can't use large page\n");
176 		return -EINVAL;
177 	}
178 	dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
179 
180 	/* allocate pointer array and alloc large pages */
181 	ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
182 	pcpur_ptrs = alloc_bootmem(ptrs_size);
183 
184 	for_each_possible_cpu(cpu) {
185 		pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
186 		if (!pcpur_ptrs[cpu])
187 			goto enomem;
188 
189 		/*
190 		 * Only use pcpur_size bytes and give back the rest.
191 		 *
192 		 * Ingo: The 2MB up-rounding bootmem is needed to make
193 		 * sure the partial 2MB page is still fully RAM - it's
194 		 * not well-specified to have a PAT-incompatible area
195 		 * (unmapped RAM, device memory, etc.) in that hole.
196 		 */
197 		free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
198 			     PMD_SIZE - pcpur_size);
199 
200 		memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
201 	}
202 
203 	/* allocate address and map */
204 	vm.flags = VM_ALLOC;
205 	vm.size = num_possible_cpus() * PMD_SIZE;
206 	vm_area_register_early(&vm, PMD_SIZE);
207 
208 	for_each_possible_cpu(cpu) {
209 		pmd_t *pmd;
210 
211 		pmd = populate_extra_pmd((unsigned long)vm.addr
212 					 + cpu * PMD_SIZE);
213 		set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
214 				     PAGE_KERNEL_LARGE));
215 	}
216 
217 	/* we're ready, commit */
218 	pr_info("PERCPU: Remapped at %p with large pages, static data "
219 		"%zu bytes\n", vm.addr, static_size);
220 
221 	ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
222 				     PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
223 				     PMD_SIZE, vm.addr, NULL);
224 	goto out_free_ar;
225 
226 enomem:
227 	for_each_possible_cpu(cpu)
228 		if (pcpur_ptrs[cpu])
229 			free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
230 	ret = -ENOMEM;
231 out_free_ar:
232 	free_bootmem(__pa(pcpur_ptrs), ptrs_size);
233 	return ret;
234 }
235 #else
236 static ssize_t __init setup_pcpu_remap(size_t static_size)
237 {
238 	return -EINVAL;
239 }
240 #endif
241 
242 /*
243  * Embedding allocator
244  *
245  * The first chunk is sized to just contain the static area plus
246  * module and dynamic reserves and embedded into linear physical
247  * mapping so that it can use PMD mapping without additional TLB
248  * pressure.
249  */
250 static ssize_t __init setup_pcpu_embed(size_t static_size)
251 {
252 	size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
253 
254 	/*
255 	 * If large page isn't supported, there's no benefit in doing
256 	 * this.  Also, embedding allocation doesn't play well with
257 	 * NUMA.
258 	 */
259 	if (!cpu_has_pse || pcpu_need_numa())
260 		return -EINVAL;
261 
262 	return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
263 				      reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
264 }
265 
266 /*
267  * 4k page allocator
268  *
269  * This is the basic allocator.  Static percpu area is allocated
270  * page-by-page and most of initialization is done by the generic
271  * setup function.
272  */
273 static struct page **pcpu4k_pages __initdata;
274 static int pcpu4k_nr_static_pages __initdata;
275 
276 static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
277 {
278 	if (pageno < pcpu4k_nr_static_pages)
279 		return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
280 	return NULL;
281 }
282 
283 static void __init pcpu4k_populate_pte(unsigned long addr)
284 {
285 	populate_extra_pte(addr);
286 }
287 
288 static ssize_t __init setup_pcpu_4k(size_t static_size)
289 {
290 	size_t pages_size;
291 	unsigned int cpu;
292 	int i, j;
293 	ssize_t ret;
294 
295 	pcpu4k_nr_static_pages = PFN_UP(static_size);
296 
297 	/* unaligned allocations can't be freed, round up to page size */
298 	pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
299 			       * sizeof(pcpu4k_pages[0]));
300 	pcpu4k_pages = alloc_bootmem(pages_size);
301 
302 	/* allocate and copy */
303 	j = 0;
304 	for_each_possible_cpu(cpu)
305 		for (i = 0; i < pcpu4k_nr_static_pages; i++) {
306 			void *ptr;
307 
308 			ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
309 			if (!ptr)
310 				goto enomem;
311 
312 			memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
313 			pcpu4k_pages[j++] = virt_to_page(ptr);
314 		}
315 
316 	/* we're ready, commit */
317 	pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
318 		pcpu4k_nr_static_pages, static_size);
319 
320 	ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
321 				     PERCPU_FIRST_CHUNK_RESERVE, -1,
322 				     -1, NULL, pcpu4k_populate_pte);
323 	goto out_free_ar;
324 
325 enomem:
326 	while (--j >= 0)
327 		free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
328 	ret = -ENOMEM;
329 out_free_ar:
330 	free_bootmem(__pa(pcpu4k_pages), pages_size);
331 	return ret;
332 }
333 
334 static inline void setup_percpu_segment(int cpu)
335 {
336 #ifdef CONFIG_X86_32
337 	struct desc_struct gdt;
338 
339 	pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
340 			0x2 | DESCTYPE_S, 0x8);
341 	gdt.s = 1;
342 	write_gdt_entry(get_cpu_gdt_table(cpu),
343 			GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
344 #endif
345 }
346 
347 /*
348  * Great future plan:
349  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
350  * Always point %gs to its beginning
351  */
352 void __init setup_per_cpu_areas(void)
353 {
354 	size_t static_size = __per_cpu_end - __per_cpu_start;
355 	unsigned int cpu;
356 	unsigned long delta;
357 	size_t pcpu_unit_size;
358 	ssize_t ret;
359 
360 	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
361 		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
362 
363 	/*
364 	 * Allocate percpu area.  If PSE is supported, try to make use
365 	 * of large page mappings.  Please read comments on top of
366 	 * each allocator for details.
367 	 */
368 	ret = setup_pcpu_remap(static_size);
369 	if (ret < 0)
370 		ret = setup_pcpu_embed(static_size);
371 	if (ret < 0)
372 		ret = setup_pcpu_4k(static_size);
373 	if (ret < 0)
374 		panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
375 		      static_size, ret);
376 
377 	pcpu_unit_size = ret;
378 
379 	/* alrighty, percpu areas up and running */
380 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
381 	for_each_possible_cpu(cpu) {
382 		per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
383 		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
384 		per_cpu(cpu_number, cpu) = cpu;
385 		setup_percpu_segment(cpu);
386 		setup_stack_canary_segment(cpu);
387 		/*
388 		 * Copy data used in early init routines from the
389 		 * initial arrays to the per cpu data areas.  These
390 		 * arrays then become expendable and the *_early_ptr's
391 		 * are zeroed indicating that the static arrays are
392 		 * gone.
393 		 */
394 #ifdef CONFIG_X86_LOCAL_APIC
395 		per_cpu(x86_cpu_to_apicid, cpu) =
396 			early_per_cpu_map(x86_cpu_to_apicid, cpu);
397 		per_cpu(x86_bios_cpu_apicid, cpu) =
398 			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
399 #endif
400 #ifdef CONFIG_X86_64
401 		per_cpu(irq_stack_ptr, cpu) =
402 			per_cpu(irq_stack_union.irq_stack, cpu) +
403 			IRQ_STACK_SIZE - 64;
404 #ifdef CONFIG_NUMA
405 		per_cpu(x86_cpu_to_node_map, cpu) =
406 			early_per_cpu_map(x86_cpu_to_node_map, cpu);
407 #endif
408 #endif
409 		/*
410 		 * Up to this point, the boot CPU has been using .data.init
411 		 * area.  Reload any changed state for the boot CPU.
412 		 */
413 		if (cpu == boot_cpu_id)
414 			switch_to_new_gdt(cpu);
415 	}
416 
417 	/* indicate the early static arrays will soon be gone */
418 #ifdef CONFIG_X86_LOCAL_APIC
419 	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
420 	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
421 #endif
422 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
423 	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
424 #endif
425 
426 	/* Setup node to cpumask map */
427 	setup_node_to_cpumask_map();
428 
429 	/* Setup cpu initialized, callin, callout masks */
430 	setup_cpu_local_masks();
431 }
432