xref: /openbmc/linux/mm/percpu.c (revision 495311927ffbe3604e915aeafdf03325e9925b9d)
1 /*
2  * mm/percpu.c - percpu memory allocator
3  *
4  * Copyright (C) 2009		SUSE Linux Products GmbH
5  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6  *
7  * This file is released under the GPLv2.
8  *
9  * This is percpu allocator which can handle both static and dynamic
10  * areas.  Percpu areas are allocated in chunks.  Each chunk is
11  * consisted of boot-time determined number of units and the first
12  * chunk is used for static percpu variables in the kernel image
13  * (special boot time alloc/init handling necessary as these areas
14  * need to be brought up before allocation services are running).
15  * Unit grows as necessary and all units grow or shrink in unison.
16  * When a chunk is filled up, another chunk is allocated.
17  *
18  *  c0                           c1                         c2
19  *  -------------------          -------------------        ------------
20  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
21  *  -------------------  ......  -------------------  ....  ------------
22  *
23  * Allocation is done in offset-size areas of single unit space.  Ie,
24  * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25  * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
26  * cpus.  On NUMA, the mapping can be non-linear and even sparse.
27  * Percpu access can be done by configuring percpu base registers
28  * according to cpu to unit mapping and pcpu_unit_size.
29  *
30  * There are usually many small percpu allocations many of them being
31  * as small as 4 bytes.  The allocator organizes chunks into lists
32  * according to free size and tries to allocate from the fullest one.
33  * Each chunk keeps the maximum contiguous area size hint which is
34  * guaranteed to be eqaul to or larger than the maximum contiguous
35  * area in the chunk.  This helps the allocator not to iterate the
36  * chunk maps unnecessarily.
37  *
38  * Allocation state in each chunk is kept using an array of integers
39  * on chunk->map.  A positive value in the map represents a free
40  * region and negative allocated.  Allocation inside a chunk is done
41  * by scanning this map sequentially and serving the first matching
42  * entry.  This is mostly copied from the percpu_modalloc() allocator.
43  * Chunks can be determined from the address using the index field
44  * in the page struct. The index field contains a pointer to the chunk.
45  *
46  * To use this allocator, arch code should do the followings.
47  *
48  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49  *   regular address to percpu pointer and back if they need to be
50  *   different from the default
51  *
52  * - use pcpu_setup_first_chunk() during percpu area initialization to
53  *   setup the first chunk containing the kernel static percpu area
54  */
55 
56 #include <linux/bitmap.h>
57 #include <linux/bootmem.h>
58 #include <linux/err.h>
59 #include <linux/list.h>
60 #include <linux/log2.h>
61 #include <linux/mm.h>
62 #include <linux/module.h>
63 #include <linux/mutex.h>
64 #include <linux/percpu.h>
65 #include <linux/pfn.h>
66 #include <linux/slab.h>
67 #include <linux/spinlock.h>
68 #include <linux/vmalloc.h>
69 #include <linux/workqueue.h>
70 
71 #include <asm/cacheflush.h>
72 #include <asm/sections.h>
73 #include <asm/tlbflush.h>
74 #include <asm/io.h>
75 
76 #define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
77 #define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
78 
79 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
80 #ifndef __addr_to_pcpu_ptr
81 #define __addr_to_pcpu_ptr(addr)					\
82 	(void __percpu *)((unsigned long)(addr) -			\
83 			  (unsigned long)pcpu_base_addr	+		\
84 			  (unsigned long)__per_cpu_start)
85 #endif
86 #ifndef __pcpu_ptr_to_addr
87 #define __pcpu_ptr_to_addr(ptr)						\
88 	(void __force *)((unsigned long)(ptr) +				\
89 			 (unsigned long)pcpu_base_addr -		\
90 			 (unsigned long)__per_cpu_start)
91 #endif
92 
93 struct pcpu_chunk {
94 	struct list_head	list;		/* linked to pcpu_slot lists */
95 	int			free_size;	/* free bytes in the chunk */
96 	int			contig_hint;	/* max contiguous size hint */
97 	void			*base_addr;	/* base address of this chunk */
98 	int			map_used;	/* # of map entries used */
99 	int			map_alloc;	/* # of map entries allocated */
100 	int			*map;		/* allocation map */
101 	void			*data;		/* chunk data */
102 	bool			immutable;	/* no [de]population allowed */
103 	unsigned long		populated[];	/* populated bitmap */
104 };
105 
106 static int pcpu_unit_pages __read_mostly;
107 static int pcpu_unit_size __read_mostly;
108 static int pcpu_nr_units __read_mostly;
109 static int pcpu_atom_size __read_mostly;
110 static int pcpu_nr_slots __read_mostly;
111 static size_t pcpu_chunk_struct_size __read_mostly;
112 
113 /* cpus with the lowest and highest unit numbers */
114 static unsigned int pcpu_first_unit_cpu __read_mostly;
115 static unsigned int pcpu_last_unit_cpu __read_mostly;
116 
117 /* the address of the first chunk which starts with the kernel static area */
118 void *pcpu_base_addr __read_mostly;
119 EXPORT_SYMBOL_GPL(pcpu_base_addr);
120 
121 static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
122 const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
123 
124 /* group information, used for vm allocation */
125 static int pcpu_nr_groups __read_mostly;
126 static const unsigned long *pcpu_group_offsets __read_mostly;
127 static const size_t *pcpu_group_sizes __read_mostly;
128 
129 /*
130  * The first chunk which always exists.  Note that unlike other
131  * chunks, this one can be allocated and mapped in several different
132  * ways and thus often doesn't live in the vmalloc area.
133  */
134 static struct pcpu_chunk *pcpu_first_chunk;
135 
136 /*
137  * Optional reserved chunk.  This chunk reserves part of the first
138  * chunk and serves it for reserved allocations.  The amount of
139  * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
140  * area doesn't exist, the following variables contain NULL and 0
141  * respectively.
142  */
143 static struct pcpu_chunk *pcpu_reserved_chunk;
144 static int pcpu_reserved_chunk_limit;
145 
146 /*
147  * Synchronization rules.
148  *
149  * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
150  * protects allocation/reclaim paths, chunks, populated bitmap and
151  * vmalloc mapping.  The latter is a spinlock and protects the index
152  * data structures - chunk slots, chunks and area maps in chunks.
153  *
154  * During allocation, pcpu_alloc_mutex is kept locked all the time and
155  * pcpu_lock is grabbed and released as necessary.  All actual memory
156  * allocations are done using GFP_KERNEL with pcpu_lock released.  In
157  * general, percpu memory can't be allocated with irq off but
158  * irqsave/restore are still used in alloc path so that it can be used
159  * from early init path - sched_init() specifically.
160  *
161  * Free path accesses and alters only the index data structures, so it
162  * can be safely called from atomic context.  When memory needs to be
163  * returned to the system, free path schedules reclaim_work which
164  * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
165  * reclaimed, release both locks and frees the chunks.  Note that it's
166  * necessary to grab both locks to remove a chunk from circulation as
167  * allocation path might be referencing the chunk with only
168  * pcpu_alloc_mutex locked.
169  */
170 static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
171 static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
172 
173 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
174 
175 /* reclaim work to release fully free chunks, scheduled from free path */
176 static void pcpu_reclaim(struct work_struct *work);
177 static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
178 
179 static bool pcpu_addr_in_first_chunk(void *addr)
180 {
181 	void *first_start = pcpu_first_chunk->base_addr;
182 
183 	return addr >= first_start && addr < first_start + pcpu_unit_size;
184 }
185 
186 static bool pcpu_addr_in_reserved_chunk(void *addr)
187 {
188 	void *first_start = pcpu_first_chunk->base_addr;
189 
190 	return addr >= first_start &&
191 		addr < first_start + pcpu_reserved_chunk_limit;
192 }
193 
194 static int __pcpu_size_to_slot(int size)
195 {
196 	int highbit = fls(size);	/* size is in bytes */
197 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
198 }
199 
200 static int pcpu_size_to_slot(int size)
201 {
202 	if (size == pcpu_unit_size)
203 		return pcpu_nr_slots - 1;
204 	return __pcpu_size_to_slot(size);
205 }
206 
207 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
208 {
209 	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
210 		return 0;
211 
212 	return pcpu_size_to_slot(chunk->free_size);
213 }
214 
215 /* set the pointer to a chunk in a page struct */
216 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
217 {
218 	page->index = (unsigned long)pcpu;
219 }
220 
221 /* obtain pointer to a chunk from a page struct */
222 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
223 {
224 	return (struct pcpu_chunk *)page->index;
225 }
226 
227 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
228 {
229 	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
230 }
231 
232 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
233 				     unsigned int cpu, int page_idx)
234 {
235 	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
236 		(page_idx << PAGE_SHIFT);
237 }
238 
239 static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
240 					   int *rs, int *re, int end)
241 {
242 	*rs = find_next_zero_bit(chunk->populated, end, *rs);
243 	*re = find_next_bit(chunk->populated, end, *rs + 1);
244 }
245 
246 static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
247 					 int *rs, int *re, int end)
248 {
249 	*rs = find_next_bit(chunk->populated, end, *rs);
250 	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
251 }
252 
253 /*
254  * (Un)populated page region iterators.  Iterate over (un)populated
255  * page regions betwen @start and @end in @chunk.  @rs and @re should
256  * be integer variables and will be set to start and end page index of
257  * the current region.
258  */
259 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
260 	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
261 	     (rs) < (re);						    \
262 	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
263 
264 #define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
265 	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
266 	     (rs) < (re);						    \
267 	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
268 
269 /**
270  * pcpu_mem_alloc - allocate memory
271  * @size: bytes to allocate
272  *
273  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
274  * kzalloc() is used; otherwise, vmalloc() is used.  The returned
275  * memory is always zeroed.
276  *
277  * CONTEXT:
278  * Does GFP_KERNEL allocation.
279  *
280  * RETURNS:
281  * Pointer to the allocated area on success, NULL on failure.
282  */
283 static void *pcpu_mem_alloc(size_t size)
284 {
285 	if (WARN_ON_ONCE(!slab_is_available()))
286 		return NULL;
287 
288 	if (size <= PAGE_SIZE)
289 		return kzalloc(size, GFP_KERNEL);
290 	else {
291 		void *ptr = vmalloc(size);
292 		if (ptr)
293 			memset(ptr, 0, size);
294 		return ptr;
295 	}
296 }
297 
298 /**
299  * pcpu_mem_free - free memory
300  * @ptr: memory to free
301  * @size: size of the area
302  *
303  * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
304  */
305 static void pcpu_mem_free(void *ptr, size_t size)
306 {
307 	if (size <= PAGE_SIZE)
308 		kfree(ptr);
309 	else
310 		vfree(ptr);
311 }
312 
313 /**
314  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
315  * @chunk: chunk of interest
316  * @oslot: the previous slot it was on
317  *
318  * This function is called after an allocation or free changed @chunk.
319  * New slot according to the changed state is determined and @chunk is
320  * moved to the slot.  Note that the reserved chunk is never put on
321  * chunk slots.
322  *
323  * CONTEXT:
324  * pcpu_lock.
325  */
326 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
327 {
328 	int nslot = pcpu_chunk_slot(chunk);
329 
330 	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
331 		if (oslot < nslot)
332 			list_move(&chunk->list, &pcpu_slot[nslot]);
333 		else
334 			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
335 	}
336 }
337 
338 /**
339  * pcpu_need_to_extend - determine whether chunk area map needs to be extended
340  * @chunk: chunk of interest
341  *
342  * Determine whether area map of @chunk needs to be extended to
343  * accomodate a new allocation.
344  *
345  * CONTEXT:
346  * pcpu_lock.
347  *
348  * RETURNS:
349  * New target map allocation length if extension is necessary, 0
350  * otherwise.
351  */
352 static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
353 {
354 	int new_alloc;
355 
356 	if (chunk->map_alloc >= chunk->map_used + 2)
357 		return 0;
358 
359 	new_alloc = PCPU_DFL_MAP_ALLOC;
360 	while (new_alloc < chunk->map_used + 2)
361 		new_alloc *= 2;
362 
363 	return new_alloc;
364 }
365 
366 /**
367  * pcpu_extend_area_map - extend area map of a chunk
368  * @chunk: chunk of interest
369  * @new_alloc: new target allocation length of the area map
370  *
371  * Extend area map of @chunk to have @new_alloc entries.
372  *
373  * CONTEXT:
374  * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
375  *
376  * RETURNS:
377  * 0 on success, -errno on failure.
378  */
379 static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
380 {
381 	int *old = NULL, *new = NULL;
382 	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
383 	unsigned long flags;
384 
385 	new = pcpu_mem_alloc(new_size);
386 	if (!new)
387 		return -ENOMEM;
388 
389 	/* acquire pcpu_lock and switch to new area map */
390 	spin_lock_irqsave(&pcpu_lock, flags);
391 
392 	if (new_alloc <= chunk->map_alloc)
393 		goto out_unlock;
394 
395 	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
396 	memcpy(new, chunk->map, old_size);
397 
398 	chunk->map_alloc = new_alloc;
399 	chunk->map = new;
400 	new = NULL;
401 
402 out_unlock:
403 	spin_unlock_irqrestore(&pcpu_lock, flags);
404 
405 	/*
406 	 * pcpu_mem_free() might end up calling vfree() which uses
407 	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
408 	 */
409 	pcpu_mem_free(old, old_size);
410 	pcpu_mem_free(new, new_size);
411 
412 	return 0;
413 }
414 
415 /**
416  * pcpu_split_block - split a map block
417  * @chunk: chunk of interest
418  * @i: index of map block to split
419  * @head: head size in bytes (can be 0)
420  * @tail: tail size in bytes (can be 0)
421  *
422  * Split the @i'th map block into two or three blocks.  If @head is
423  * non-zero, @head bytes block is inserted before block @i moving it
424  * to @i+1 and reducing its size by @head bytes.
425  *
426  * If @tail is non-zero, the target block, which can be @i or @i+1
427  * depending on @head, is reduced by @tail bytes and @tail byte block
428  * is inserted after the target block.
429  *
430  * @chunk->map must have enough free slots to accomodate the split.
431  *
432  * CONTEXT:
433  * pcpu_lock.
434  */
435 static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
436 			     int head, int tail)
437 {
438 	int nr_extra = !!head + !!tail;
439 
440 	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
441 
442 	/* insert new subblocks */
443 	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
444 		sizeof(chunk->map[0]) * (chunk->map_used - i));
445 	chunk->map_used += nr_extra;
446 
447 	if (head) {
448 		chunk->map[i + 1] = chunk->map[i] - head;
449 		chunk->map[i++] = head;
450 	}
451 	if (tail) {
452 		chunk->map[i++] -= tail;
453 		chunk->map[i] = tail;
454 	}
455 }
456 
457 /**
458  * pcpu_alloc_area - allocate area from a pcpu_chunk
459  * @chunk: chunk of interest
460  * @size: wanted size in bytes
461  * @align: wanted align
462  *
463  * Try to allocate @size bytes area aligned at @align from @chunk.
464  * Note that this function only allocates the offset.  It doesn't
465  * populate or map the area.
466  *
467  * @chunk->map must have at least two free slots.
468  *
469  * CONTEXT:
470  * pcpu_lock.
471  *
472  * RETURNS:
473  * Allocated offset in @chunk on success, -1 if no matching area is
474  * found.
475  */
476 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
477 {
478 	int oslot = pcpu_chunk_slot(chunk);
479 	int max_contig = 0;
480 	int i, off;
481 
482 	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
483 		bool is_last = i + 1 == chunk->map_used;
484 		int head, tail;
485 
486 		/* extra for alignment requirement */
487 		head = ALIGN(off, align) - off;
488 		BUG_ON(i == 0 && head != 0);
489 
490 		if (chunk->map[i] < 0)
491 			continue;
492 		if (chunk->map[i] < head + size) {
493 			max_contig = max(chunk->map[i], max_contig);
494 			continue;
495 		}
496 
497 		/*
498 		 * If head is small or the previous block is free,
499 		 * merge'em.  Note that 'small' is defined as smaller
500 		 * than sizeof(int), which is very small but isn't too
501 		 * uncommon for percpu allocations.
502 		 */
503 		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
504 			if (chunk->map[i - 1] > 0)
505 				chunk->map[i - 1] += head;
506 			else {
507 				chunk->map[i - 1] -= head;
508 				chunk->free_size -= head;
509 			}
510 			chunk->map[i] -= head;
511 			off += head;
512 			head = 0;
513 		}
514 
515 		/* if tail is small, just keep it around */
516 		tail = chunk->map[i] - head - size;
517 		if (tail < sizeof(int))
518 			tail = 0;
519 
520 		/* split if warranted */
521 		if (head || tail) {
522 			pcpu_split_block(chunk, i, head, tail);
523 			if (head) {
524 				i++;
525 				off += head;
526 				max_contig = max(chunk->map[i - 1], max_contig);
527 			}
528 			if (tail)
529 				max_contig = max(chunk->map[i + 1], max_contig);
530 		}
531 
532 		/* update hint and mark allocated */
533 		if (is_last)
534 			chunk->contig_hint = max_contig; /* fully scanned */
535 		else
536 			chunk->contig_hint = max(chunk->contig_hint,
537 						 max_contig);
538 
539 		chunk->free_size -= chunk->map[i];
540 		chunk->map[i] = -chunk->map[i];
541 
542 		pcpu_chunk_relocate(chunk, oslot);
543 		return off;
544 	}
545 
546 	chunk->contig_hint = max_contig;	/* fully scanned */
547 	pcpu_chunk_relocate(chunk, oslot);
548 
549 	/* tell the upper layer that this chunk has no matching area */
550 	return -1;
551 }
552 
553 /**
554  * pcpu_free_area - free area to a pcpu_chunk
555  * @chunk: chunk of interest
556  * @freeme: offset of area to free
557  *
558  * Free area starting from @freeme to @chunk.  Note that this function
559  * only modifies the allocation map.  It doesn't depopulate or unmap
560  * the area.
561  *
562  * CONTEXT:
563  * pcpu_lock.
564  */
565 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
566 {
567 	int oslot = pcpu_chunk_slot(chunk);
568 	int i, off;
569 
570 	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
571 		if (off == freeme)
572 			break;
573 	BUG_ON(off != freeme);
574 	BUG_ON(chunk->map[i] > 0);
575 
576 	chunk->map[i] = -chunk->map[i];
577 	chunk->free_size += chunk->map[i];
578 
579 	/* merge with previous? */
580 	if (i > 0 && chunk->map[i - 1] >= 0) {
581 		chunk->map[i - 1] += chunk->map[i];
582 		chunk->map_used--;
583 		memmove(&chunk->map[i], &chunk->map[i + 1],
584 			(chunk->map_used - i) * sizeof(chunk->map[0]));
585 		i--;
586 	}
587 	/* merge with next? */
588 	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
589 		chunk->map[i] += chunk->map[i + 1];
590 		chunk->map_used--;
591 		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
592 			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
593 	}
594 
595 	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
596 	pcpu_chunk_relocate(chunk, oslot);
597 }
598 
599 static struct pcpu_chunk *pcpu_alloc_chunk(void)
600 {
601 	struct pcpu_chunk *chunk;
602 
603 	chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
604 	if (!chunk)
605 		return NULL;
606 
607 	chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
608 	if (!chunk->map) {
609 		kfree(chunk);
610 		return NULL;
611 	}
612 
613 	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
614 	chunk->map[chunk->map_used++] = pcpu_unit_size;
615 
616 	INIT_LIST_HEAD(&chunk->list);
617 	chunk->free_size = pcpu_unit_size;
618 	chunk->contig_hint = pcpu_unit_size;
619 
620 	return chunk;
621 }
622 
623 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
624 {
625 	if (!chunk)
626 		return;
627 	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
628 	kfree(chunk);
629 }
630 
631 /*
632  * Chunk management implementation.
633  *
634  * To allow different implementations, chunk alloc/free and
635  * [de]population are implemented in a separate file which is pulled
636  * into this file and compiled together.  The following functions
637  * should be implemented.
638  *
639  * pcpu_populate_chunk		- populate the specified range of a chunk
640  * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
641  * pcpu_create_chunk		- create a new chunk
642  * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
643  * pcpu_addr_to_page		- translate address to physical address
644  * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
645  */
646 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
647 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
648 static struct pcpu_chunk *pcpu_create_chunk(void);
649 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
650 static struct page *pcpu_addr_to_page(void *addr);
651 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
652 
653 #ifdef CONFIG_NEED_PER_CPU_KM
654 #include "percpu-km.c"
655 #else
656 #include "percpu-vm.c"
657 #endif
658 
659 /**
660  * pcpu_chunk_addr_search - determine chunk containing specified address
661  * @addr: address for which the chunk needs to be determined.
662  *
663  * RETURNS:
664  * The address of the found chunk.
665  */
666 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
667 {
668 	/* is it in the first chunk? */
669 	if (pcpu_addr_in_first_chunk(addr)) {
670 		/* is it in the reserved area? */
671 		if (pcpu_addr_in_reserved_chunk(addr))
672 			return pcpu_reserved_chunk;
673 		return pcpu_first_chunk;
674 	}
675 
676 	/*
677 	 * The address is relative to unit0 which might be unused and
678 	 * thus unmapped.  Offset the address to the unit space of the
679 	 * current processor before looking it up in the vmalloc
680 	 * space.  Note that any possible cpu id can be used here, so
681 	 * there's no need to worry about preemption or cpu hotplug.
682 	 */
683 	addr += pcpu_unit_offsets[raw_smp_processor_id()];
684 	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
685 }
686 
687 /**
688  * pcpu_alloc - the percpu allocator
689  * @size: size of area to allocate in bytes
690  * @align: alignment of area (max PAGE_SIZE)
691  * @reserved: allocate from the reserved chunk if available
692  *
693  * Allocate percpu area of @size bytes aligned at @align.
694  *
695  * CONTEXT:
696  * Does GFP_KERNEL allocation.
697  *
698  * RETURNS:
699  * Percpu pointer to the allocated area on success, NULL on failure.
700  */
701 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
702 {
703 	static int warn_limit = 10;
704 	struct pcpu_chunk *chunk;
705 	const char *err;
706 	int slot, off, new_alloc;
707 	unsigned long flags;
708 
709 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
710 		WARN(true, "illegal size (%zu) or align (%zu) for "
711 		     "percpu allocation\n", size, align);
712 		return NULL;
713 	}
714 
715 	mutex_lock(&pcpu_alloc_mutex);
716 	spin_lock_irqsave(&pcpu_lock, flags);
717 
718 	/* serve reserved allocations from the reserved chunk if available */
719 	if (reserved && pcpu_reserved_chunk) {
720 		chunk = pcpu_reserved_chunk;
721 
722 		if (size > chunk->contig_hint) {
723 			err = "alloc from reserved chunk failed";
724 			goto fail_unlock;
725 		}
726 
727 		while ((new_alloc = pcpu_need_to_extend(chunk))) {
728 			spin_unlock_irqrestore(&pcpu_lock, flags);
729 			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
730 				err = "failed to extend area map of reserved chunk";
731 				goto fail_unlock_mutex;
732 			}
733 			spin_lock_irqsave(&pcpu_lock, flags);
734 		}
735 
736 		off = pcpu_alloc_area(chunk, size, align);
737 		if (off >= 0)
738 			goto area_found;
739 
740 		err = "alloc from reserved chunk failed";
741 		goto fail_unlock;
742 	}
743 
744 restart:
745 	/* search through normal chunks */
746 	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
747 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
748 			if (size > chunk->contig_hint)
749 				continue;
750 
751 			new_alloc = pcpu_need_to_extend(chunk);
752 			if (new_alloc) {
753 				spin_unlock_irqrestore(&pcpu_lock, flags);
754 				if (pcpu_extend_area_map(chunk,
755 							 new_alloc) < 0) {
756 					err = "failed to extend area map";
757 					goto fail_unlock_mutex;
758 				}
759 				spin_lock_irqsave(&pcpu_lock, flags);
760 				/*
761 				 * pcpu_lock has been dropped, need to
762 				 * restart cpu_slot list walking.
763 				 */
764 				goto restart;
765 			}
766 
767 			off = pcpu_alloc_area(chunk, size, align);
768 			if (off >= 0)
769 				goto area_found;
770 		}
771 	}
772 
773 	/* hmmm... no space left, create a new chunk */
774 	spin_unlock_irqrestore(&pcpu_lock, flags);
775 
776 	chunk = pcpu_create_chunk();
777 	if (!chunk) {
778 		err = "failed to allocate new chunk";
779 		goto fail_unlock_mutex;
780 	}
781 
782 	spin_lock_irqsave(&pcpu_lock, flags);
783 	pcpu_chunk_relocate(chunk, -1);
784 	goto restart;
785 
786 area_found:
787 	spin_unlock_irqrestore(&pcpu_lock, flags);
788 
789 	/* populate, map and clear the area */
790 	if (pcpu_populate_chunk(chunk, off, size)) {
791 		spin_lock_irqsave(&pcpu_lock, flags);
792 		pcpu_free_area(chunk, off);
793 		err = "failed to populate";
794 		goto fail_unlock;
795 	}
796 
797 	mutex_unlock(&pcpu_alloc_mutex);
798 
799 	/* return address relative to base address */
800 	return __addr_to_pcpu_ptr(chunk->base_addr + off);
801 
802 fail_unlock:
803 	spin_unlock_irqrestore(&pcpu_lock, flags);
804 fail_unlock_mutex:
805 	mutex_unlock(&pcpu_alloc_mutex);
806 	if (warn_limit) {
807 		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
808 			   "%s\n", size, align, err);
809 		dump_stack();
810 		if (!--warn_limit)
811 			pr_info("PERCPU: limit reached, disable warning\n");
812 	}
813 	return NULL;
814 }
815 
816 /**
817  * __alloc_percpu - allocate dynamic percpu area
818  * @size: size of area to allocate in bytes
819  * @align: alignment of area (max PAGE_SIZE)
820  *
821  * Allocate percpu area of @size bytes aligned at @align.  Might
822  * sleep.  Might trigger writeouts.
823  *
824  * CONTEXT:
825  * Does GFP_KERNEL allocation.
826  *
827  * RETURNS:
828  * Percpu pointer to the allocated area on success, NULL on failure.
829  */
830 void __percpu *__alloc_percpu(size_t size, size_t align)
831 {
832 	return pcpu_alloc(size, align, false);
833 }
834 EXPORT_SYMBOL_GPL(__alloc_percpu);
835 
836 /**
837  * __alloc_reserved_percpu - allocate reserved percpu area
838  * @size: size of area to allocate in bytes
839  * @align: alignment of area (max PAGE_SIZE)
840  *
841  * Allocate percpu area of @size bytes aligned at @align from reserved
842  * percpu area if arch has set it up; otherwise, allocation is served
843  * from the same dynamic area.  Might sleep.  Might trigger writeouts.
844  *
845  * CONTEXT:
846  * Does GFP_KERNEL allocation.
847  *
848  * RETURNS:
849  * Percpu pointer to the allocated area on success, NULL on failure.
850  */
851 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
852 {
853 	return pcpu_alloc(size, align, true);
854 }
855 
856 /**
857  * pcpu_reclaim - reclaim fully free chunks, workqueue function
858  * @work: unused
859  *
860  * Reclaim all fully free chunks except for the first one.
861  *
862  * CONTEXT:
863  * workqueue context.
864  */
865 static void pcpu_reclaim(struct work_struct *work)
866 {
867 	LIST_HEAD(todo);
868 	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
869 	struct pcpu_chunk *chunk, *next;
870 
871 	mutex_lock(&pcpu_alloc_mutex);
872 	spin_lock_irq(&pcpu_lock);
873 
874 	list_for_each_entry_safe(chunk, next, head, list) {
875 		WARN_ON(chunk->immutable);
876 
877 		/* spare the first one */
878 		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
879 			continue;
880 
881 		list_move(&chunk->list, &todo);
882 	}
883 
884 	spin_unlock_irq(&pcpu_lock);
885 
886 	list_for_each_entry_safe(chunk, next, &todo, list) {
887 		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
888 		pcpu_destroy_chunk(chunk);
889 	}
890 
891 	mutex_unlock(&pcpu_alloc_mutex);
892 }
893 
894 /**
895  * free_percpu - free percpu area
896  * @ptr: pointer to area to free
897  *
898  * Free percpu area @ptr.
899  *
900  * CONTEXT:
901  * Can be called from atomic context.
902  */
903 void free_percpu(void __percpu *ptr)
904 {
905 	void *addr;
906 	struct pcpu_chunk *chunk;
907 	unsigned long flags;
908 	int off;
909 
910 	if (!ptr)
911 		return;
912 
913 	addr = __pcpu_ptr_to_addr(ptr);
914 
915 	spin_lock_irqsave(&pcpu_lock, flags);
916 
917 	chunk = pcpu_chunk_addr_search(addr);
918 	off = addr - chunk->base_addr;
919 
920 	pcpu_free_area(chunk, off);
921 
922 	/* if there are more than one fully free chunks, wake up grim reaper */
923 	if (chunk->free_size == pcpu_unit_size) {
924 		struct pcpu_chunk *pos;
925 
926 		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
927 			if (pos != chunk) {
928 				schedule_work(&pcpu_reclaim_work);
929 				break;
930 			}
931 	}
932 
933 	spin_unlock_irqrestore(&pcpu_lock, flags);
934 }
935 EXPORT_SYMBOL_GPL(free_percpu);
936 
937 /**
938  * is_kernel_percpu_address - test whether address is from static percpu area
939  * @addr: address to test
940  *
941  * Test whether @addr belongs to in-kernel static percpu area.  Module
942  * static percpu areas are not considered.  For those, use
943  * is_module_percpu_address().
944  *
945  * RETURNS:
946  * %true if @addr is from in-kernel static percpu area, %false otherwise.
947  */
948 bool is_kernel_percpu_address(unsigned long addr)
949 {
950 	const size_t static_size = __per_cpu_end - __per_cpu_start;
951 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
952 	unsigned int cpu;
953 
954 	for_each_possible_cpu(cpu) {
955 		void *start = per_cpu_ptr(base, cpu);
956 
957 		if ((void *)addr >= start && (void *)addr < start + static_size)
958 			return true;
959         }
960 	return false;
961 }
962 
963 /**
964  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
965  * @addr: the address to be converted to physical address
966  *
967  * Given @addr which is dereferenceable address obtained via one of
968  * percpu access macros, this function translates it into its physical
969  * address.  The caller is responsible for ensuring @addr stays valid
970  * until this function finishes.
971  *
972  * RETURNS:
973  * The physical address for @addr.
974  */
975 phys_addr_t per_cpu_ptr_to_phys(void *addr)
976 {
977 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
978 	bool in_first_chunk = false;
979 	unsigned long first_start, first_end;
980 	unsigned int cpu;
981 
982 	/*
983 	 * The following test on first_start/end isn't strictly
984 	 * necessary but will speed up lookups of addresses which
985 	 * aren't in the first chunk.
986 	 */
987 	first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
988 	first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
989 				    pcpu_unit_pages);
990 	if ((unsigned long)addr >= first_start &&
991 	    (unsigned long)addr < first_end) {
992 		for_each_possible_cpu(cpu) {
993 			void *start = per_cpu_ptr(base, cpu);
994 
995 			if (addr >= start && addr < start + pcpu_unit_size) {
996 				in_first_chunk = true;
997 				break;
998 			}
999 		}
1000 	}
1001 
1002 	if (in_first_chunk) {
1003 		if ((unsigned long)addr < VMALLOC_START ||
1004 		    (unsigned long)addr >= VMALLOC_END)
1005 			return __pa(addr);
1006 		else
1007 			return page_to_phys(vmalloc_to_page(addr));
1008 	} else
1009 		return page_to_phys(pcpu_addr_to_page(addr));
1010 }
1011 
1012 /**
1013  * pcpu_alloc_alloc_info - allocate percpu allocation info
1014  * @nr_groups: the number of groups
1015  * @nr_units: the number of units
1016  *
1017  * Allocate ai which is large enough for @nr_groups groups containing
1018  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1019  * cpu_map array which is long enough for @nr_units and filled with
1020  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1021  * pointer of other groups.
1022  *
1023  * RETURNS:
1024  * Pointer to the allocated pcpu_alloc_info on success, NULL on
1025  * failure.
1026  */
1027 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1028 						      int nr_units)
1029 {
1030 	struct pcpu_alloc_info *ai;
1031 	size_t base_size, ai_size;
1032 	void *ptr;
1033 	int unit;
1034 
1035 	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1036 			  __alignof__(ai->groups[0].cpu_map[0]));
1037 	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1038 
1039 	ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1040 	if (!ptr)
1041 		return NULL;
1042 	ai = ptr;
1043 	ptr += base_size;
1044 
1045 	ai->groups[0].cpu_map = ptr;
1046 
1047 	for (unit = 0; unit < nr_units; unit++)
1048 		ai->groups[0].cpu_map[unit] = NR_CPUS;
1049 
1050 	ai->nr_groups = nr_groups;
1051 	ai->__ai_size = PFN_ALIGN(ai_size);
1052 
1053 	return ai;
1054 }
1055 
1056 /**
1057  * pcpu_free_alloc_info - free percpu allocation info
1058  * @ai: pcpu_alloc_info to free
1059  *
1060  * Free @ai which was allocated by pcpu_alloc_alloc_info().
1061  */
1062 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1063 {
1064 	free_bootmem(__pa(ai), ai->__ai_size);
1065 }
1066 
1067 /**
1068  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1069  * @reserved_size: the size of reserved percpu area in bytes
1070  * @dyn_size: minimum free size for dynamic allocation in bytes
1071  * @atom_size: allocation atom size
1072  * @cpu_distance_fn: callback to determine distance between cpus, optional
1073  *
1074  * This function determines grouping of units, their mappings to cpus
1075  * and other parameters considering needed percpu size, allocation
1076  * atom size and distances between CPUs.
1077  *
1078  * Groups are always mutliples of atom size and CPUs which are of
1079  * LOCAL_DISTANCE both ways are grouped together and share space for
1080  * units in the same group.  The returned configuration is guaranteed
1081  * to have CPUs on different nodes on different groups and >=75% usage
1082  * of allocated virtual address space.
1083  *
1084  * RETURNS:
1085  * On success, pointer to the new allocation_info is returned.  On
1086  * failure, ERR_PTR value is returned.
1087  */
1088 static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1089 				size_t reserved_size, size_t dyn_size,
1090 				size_t atom_size,
1091 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1092 {
1093 	static int group_map[NR_CPUS] __initdata;
1094 	static int group_cnt[NR_CPUS] __initdata;
1095 	const size_t static_size = __per_cpu_end - __per_cpu_start;
1096 	int nr_groups = 1, nr_units = 0;
1097 	size_t size_sum, min_unit_size, alloc_size;
1098 	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1099 	int last_allocs, group, unit;
1100 	unsigned int cpu, tcpu;
1101 	struct pcpu_alloc_info *ai;
1102 	unsigned int *cpu_map;
1103 
1104 	/* this function may be called multiple times */
1105 	memset(group_map, 0, sizeof(group_map));
1106 	memset(group_cnt, 0, sizeof(group_cnt));
1107 
1108 	/* calculate size_sum and ensure dyn_size is enough for early alloc */
1109 	size_sum = PFN_ALIGN(static_size + reserved_size +
1110 			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1111 	dyn_size = size_sum - static_size - reserved_size;
1112 
1113 	/*
1114 	 * Determine min_unit_size, alloc_size and max_upa such that
1115 	 * alloc_size is multiple of atom_size and is the smallest
1116 	 * which can accomodate 4k aligned segments which are equal to
1117 	 * or larger than min_unit_size.
1118 	 */
1119 	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1120 
1121 	alloc_size = roundup(min_unit_size, atom_size);
1122 	upa = alloc_size / min_unit_size;
1123 	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1124 		upa--;
1125 	max_upa = upa;
1126 
1127 	/* group cpus according to their proximity */
1128 	for_each_possible_cpu(cpu) {
1129 		group = 0;
1130 	next_group:
1131 		for_each_possible_cpu(tcpu) {
1132 			if (cpu == tcpu)
1133 				break;
1134 			if (group_map[tcpu] == group && cpu_distance_fn &&
1135 			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1136 			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1137 				group++;
1138 				nr_groups = max(nr_groups, group + 1);
1139 				goto next_group;
1140 			}
1141 		}
1142 		group_map[cpu] = group;
1143 		group_cnt[group]++;
1144 	}
1145 
1146 	/*
1147 	 * Expand unit size until address space usage goes over 75%
1148 	 * and then as much as possible without using more address
1149 	 * space.
1150 	 */
1151 	last_allocs = INT_MAX;
1152 	for (upa = max_upa; upa; upa--) {
1153 		int allocs = 0, wasted = 0;
1154 
1155 		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1156 			continue;
1157 
1158 		for (group = 0; group < nr_groups; group++) {
1159 			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1160 			allocs += this_allocs;
1161 			wasted += this_allocs * upa - group_cnt[group];
1162 		}
1163 
1164 		/*
1165 		 * Don't accept if wastage is over 25%.  The
1166 		 * greater-than comparison ensures upa==1 always
1167 		 * passes the following check.
1168 		 */
1169 		if (wasted > num_possible_cpus() / 3)
1170 			continue;
1171 
1172 		/* and then don't consume more memory */
1173 		if (allocs > last_allocs)
1174 			break;
1175 		last_allocs = allocs;
1176 		best_upa = upa;
1177 	}
1178 	upa = best_upa;
1179 
1180 	/* allocate and fill alloc_info */
1181 	for (group = 0; group < nr_groups; group++)
1182 		nr_units += roundup(group_cnt[group], upa);
1183 
1184 	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1185 	if (!ai)
1186 		return ERR_PTR(-ENOMEM);
1187 	cpu_map = ai->groups[0].cpu_map;
1188 
1189 	for (group = 0; group < nr_groups; group++) {
1190 		ai->groups[group].cpu_map = cpu_map;
1191 		cpu_map += roundup(group_cnt[group], upa);
1192 	}
1193 
1194 	ai->static_size = static_size;
1195 	ai->reserved_size = reserved_size;
1196 	ai->dyn_size = dyn_size;
1197 	ai->unit_size = alloc_size / upa;
1198 	ai->atom_size = atom_size;
1199 	ai->alloc_size = alloc_size;
1200 
1201 	for (group = 0, unit = 0; group_cnt[group]; group++) {
1202 		struct pcpu_group_info *gi = &ai->groups[group];
1203 
1204 		/*
1205 		 * Initialize base_offset as if all groups are located
1206 		 * back-to-back.  The caller should update this to
1207 		 * reflect actual allocation.
1208 		 */
1209 		gi->base_offset = unit * ai->unit_size;
1210 
1211 		for_each_possible_cpu(cpu)
1212 			if (group_map[cpu] == group)
1213 				gi->cpu_map[gi->nr_units++] = cpu;
1214 		gi->nr_units = roundup(gi->nr_units, upa);
1215 		unit += gi->nr_units;
1216 	}
1217 	BUG_ON(unit != nr_units);
1218 
1219 	return ai;
1220 }
1221 
1222 /**
1223  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1224  * @lvl: loglevel
1225  * @ai: allocation info to dump
1226  *
1227  * Print out information about @ai using loglevel @lvl.
1228  */
1229 static void pcpu_dump_alloc_info(const char *lvl,
1230 				 const struct pcpu_alloc_info *ai)
1231 {
1232 	int group_width = 1, cpu_width = 1, width;
1233 	char empty_str[] = "--------";
1234 	int alloc = 0, alloc_end = 0;
1235 	int group, v;
1236 	int upa, apl;	/* units per alloc, allocs per line */
1237 
1238 	v = ai->nr_groups;
1239 	while (v /= 10)
1240 		group_width++;
1241 
1242 	v = num_possible_cpus();
1243 	while (v /= 10)
1244 		cpu_width++;
1245 	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1246 
1247 	upa = ai->alloc_size / ai->unit_size;
1248 	width = upa * (cpu_width + 1) + group_width + 3;
1249 	apl = rounddown_pow_of_two(max(60 / width, 1));
1250 
1251 	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1252 	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1253 	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1254 
1255 	for (group = 0; group < ai->nr_groups; group++) {
1256 		const struct pcpu_group_info *gi = &ai->groups[group];
1257 		int unit = 0, unit_end = 0;
1258 
1259 		BUG_ON(gi->nr_units % upa);
1260 		for (alloc_end += gi->nr_units / upa;
1261 		     alloc < alloc_end; alloc++) {
1262 			if (!(alloc % apl)) {
1263 				printk("\n");
1264 				printk("%spcpu-alloc: ", lvl);
1265 			}
1266 			printk("[%0*d] ", group_width, group);
1267 
1268 			for (unit_end += upa; unit < unit_end; unit++)
1269 				if (gi->cpu_map[unit] != NR_CPUS)
1270 					printk("%0*d ", cpu_width,
1271 					       gi->cpu_map[unit]);
1272 				else
1273 					printk("%s ", empty_str);
1274 		}
1275 	}
1276 	printk("\n");
1277 }
1278 
1279 /**
1280  * pcpu_setup_first_chunk - initialize the first percpu chunk
1281  * @ai: pcpu_alloc_info describing how to percpu area is shaped
1282  * @base_addr: mapped address
1283  *
1284  * Initialize the first percpu chunk which contains the kernel static
1285  * perpcu area.  This function is to be called from arch percpu area
1286  * setup path.
1287  *
1288  * @ai contains all information necessary to initialize the first
1289  * chunk and prime the dynamic percpu allocator.
1290  *
1291  * @ai->static_size is the size of static percpu area.
1292  *
1293  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1294  * reserve after the static area in the first chunk.  This reserves
1295  * the first chunk such that it's available only through reserved
1296  * percpu allocation.  This is primarily used to serve module percpu
1297  * static areas on architectures where the addressing model has
1298  * limited offset range for symbol relocations to guarantee module
1299  * percpu symbols fall inside the relocatable range.
1300  *
1301  * @ai->dyn_size determines the number of bytes available for dynamic
1302  * allocation in the first chunk.  The area between @ai->static_size +
1303  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1304  *
1305  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1306  * and equal to or larger than @ai->static_size + @ai->reserved_size +
1307  * @ai->dyn_size.
1308  *
1309  * @ai->atom_size is the allocation atom size and used as alignment
1310  * for vm areas.
1311  *
1312  * @ai->alloc_size is the allocation size and always multiple of
1313  * @ai->atom_size.  This is larger than @ai->atom_size if
1314  * @ai->unit_size is larger than @ai->atom_size.
1315  *
1316  * @ai->nr_groups and @ai->groups describe virtual memory layout of
1317  * percpu areas.  Units which should be colocated are put into the
1318  * same group.  Dynamic VM areas will be allocated according to these
1319  * groupings.  If @ai->nr_groups is zero, a single group containing
1320  * all units is assumed.
1321  *
1322  * The caller should have mapped the first chunk at @base_addr and
1323  * copied static data to each unit.
1324  *
1325  * If the first chunk ends up with both reserved and dynamic areas, it
1326  * is served by two chunks - one to serve the core static and reserved
1327  * areas and the other for the dynamic area.  They share the same vm
1328  * and page map but uses different area allocation map to stay away
1329  * from each other.  The latter chunk is circulated in the chunk slots
1330  * and available for dynamic allocation like any other chunks.
1331  *
1332  * RETURNS:
1333  * 0 on success, -errno on failure.
1334  */
1335 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1336 				  void *base_addr)
1337 {
1338 	static char cpus_buf[4096] __initdata;
1339 	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1340 	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1341 	size_t dyn_size = ai->dyn_size;
1342 	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1343 	struct pcpu_chunk *schunk, *dchunk = NULL;
1344 	unsigned long *group_offsets;
1345 	size_t *group_sizes;
1346 	unsigned long *unit_off;
1347 	unsigned int cpu;
1348 	int *unit_map;
1349 	int group, unit, i;
1350 
1351 	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1352 
1353 #define PCPU_SETUP_BUG_ON(cond)	do {					\
1354 	if (unlikely(cond)) {						\
1355 		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
1356 		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
1357 		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1358 		BUG();							\
1359 	}								\
1360 } while (0)
1361 
1362 	/* sanity checks */
1363 	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1364 	PCPU_SETUP_BUG_ON(!ai->static_size);
1365 	PCPU_SETUP_BUG_ON(!base_addr);
1366 	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1367 	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1368 	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1369 	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1370 	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1371 
1372 	/* process group information and build config tables accordingly */
1373 	group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1374 	group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1375 	unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1376 	unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1377 
1378 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1379 		unit_map[cpu] = UINT_MAX;
1380 	pcpu_first_unit_cpu = NR_CPUS;
1381 
1382 	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1383 		const struct pcpu_group_info *gi = &ai->groups[group];
1384 
1385 		group_offsets[group] = gi->base_offset;
1386 		group_sizes[group] = gi->nr_units * ai->unit_size;
1387 
1388 		for (i = 0; i < gi->nr_units; i++) {
1389 			cpu = gi->cpu_map[i];
1390 			if (cpu == NR_CPUS)
1391 				continue;
1392 
1393 			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1394 			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1395 			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1396 
1397 			unit_map[cpu] = unit + i;
1398 			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1399 
1400 			if (pcpu_first_unit_cpu == NR_CPUS)
1401 				pcpu_first_unit_cpu = cpu;
1402 		}
1403 	}
1404 	pcpu_last_unit_cpu = cpu;
1405 	pcpu_nr_units = unit;
1406 
1407 	for_each_possible_cpu(cpu)
1408 		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1409 
1410 	/* we're done parsing the input, undefine BUG macro and dump config */
1411 #undef PCPU_SETUP_BUG_ON
1412 	pcpu_dump_alloc_info(KERN_INFO, ai);
1413 
1414 	pcpu_nr_groups = ai->nr_groups;
1415 	pcpu_group_offsets = group_offsets;
1416 	pcpu_group_sizes = group_sizes;
1417 	pcpu_unit_map = unit_map;
1418 	pcpu_unit_offsets = unit_off;
1419 
1420 	/* determine basic parameters */
1421 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1422 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1423 	pcpu_atom_size = ai->atom_size;
1424 	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1425 		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1426 
1427 	/*
1428 	 * Allocate chunk slots.  The additional last slot is for
1429 	 * empty chunks.
1430 	 */
1431 	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1432 	pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1433 	for (i = 0; i < pcpu_nr_slots; i++)
1434 		INIT_LIST_HEAD(&pcpu_slot[i]);
1435 
1436 	/*
1437 	 * Initialize static chunk.  If reserved_size is zero, the
1438 	 * static chunk covers static area + dynamic allocation area
1439 	 * in the first chunk.  If reserved_size is not zero, it
1440 	 * covers static area + reserved area (mostly used for module
1441 	 * static percpu allocation).
1442 	 */
1443 	schunk = alloc_bootmem(pcpu_chunk_struct_size);
1444 	INIT_LIST_HEAD(&schunk->list);
1445 	schunk->base_addr = base_addr;
1446 	schunk->map = smap;
1447 	schunk->map_alloc = ARRAY_SIZE(smap);
1448 	schunk->immutable = true;
1449 	bitmap_fill(schunk->populated, pcpu_unit_pages);
1450 
1451 	if (ai->reserved_size) {
1452 		schunk->free_size = ai->reserved_size;
1453 		pcpu_reserved_chunk = schunk;
1454 		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1455 	} else {
1456 		schunk->free_size = dyn_size;
1457 		dyn_size = 0;			/* dynamic area covered */
1458 	}
1459 	schunk->contig_hint = schunk->free_size;
1460 
1461 	schunk->map[schunk->map_used++] = -ai->static_size;
1462 	if (schunk->free_size)
1463 		schunk->map[schunk->map_used++] = schunk->free_size;
1464 
1465 	/* init dynamic chunk if necessary */
1466 	if (dyn_size) {
1467 		dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1468 		INIT_LIST_HEAD(&dchunk->list);
1469 		dchunk->base_addr = base_addr;
1470 		dchunk->map = dmap;
1471 		dchunk->map_alloc = ARRAY_SIZE(dmap);
1472 		dchunk->immutable = true;
1473 		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1474 
1475 		dchunk->contig_hint = dchunk->free_size = dyn_size;
1476 		dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1477 		dchunk->map[dchunk->map_used++] = dchunk->free_size;
1478 	}
1479 
1480 	/* link the first chunk in */
1481 	pcpu_first_chunk = dchunk ?: schunk;
1482 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1483 
1484 	/* we're done */
1485 	pcpu_base_addr = base_addr;
1486 	return 0;
1487 }
1488 
1489 const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1490 	[PCPU_FC_AUTO]	= "auto",
1491 	[PCPU_FC_EMBED]	= "embed",
1492 	[PCPU_FC_PAGE]	= "page",
1493 };
1494 
1495 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1496 
1497 static int __init percpu_alloc_setup(char *str)
1498 {
1499 	if (0)
1500 		/* nada */;
1501 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1502 	else if (!strcmp(str, "embed"))
1503 		pcpu_chosen_fc = PCPU_FC_EMBED;
1504 #endif
1505 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1506 	else if (!strcmp(str, "page"))
1507 		pcpu_chosen_fc = PCPU_FC_PAGE;
1508 #endif
1509 	else
1510 		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1511 
1512 	return 0;
1513 }
1514 early_param("percpu_alloc", percpu_alloc_setup);
1515 
1516 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1517 	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1518 /**
1519  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1520  * @reserved_size: the size of reserved percpu area in bytes
1521  * @dyn_size: minimum free size for dynamic allocation in bytes
1522  * @atom_size: allocation atom size
1523  * @cpu_distance_fn: callback to determine distance between cpus, optional
1524  * @alloc_fn: function to allocate percpu page
1525  * @free_fn: funtion to free percpu page
1526  *
1527  * This is a helper to ease setting up embedded first percpu chunk and
1528  * can be called where pcpu_setup_first_chunk() is expected.
1529  *
1530  * If this function is used to setup the first chunk, it is allocated
1531  * by calling @alloc_fn and used as-is without being mapped into
1532  * vmalloc area.  Allocations are always whole multiples of @atom_size
1533  * aligned to @atom_size.
1534  *
1535  * This enables the first chunk to piggy back on the linear physical
1536  * mapping which often uses larger page size.  Please note that this
1537  * can result in very sparse cpu->unit mapping on NUMA machines thus
1538  * requiring large vmalloc address space.  Don't use this allocator if
1539  * vmalloc space is not orders of magnitude larger than distances
1540  * between node memory addresses (ie. 32bit NUMA machines).
1541  *
1542  * @dyn_size specifies the minimum dynamic area size.
1543  *
1544  * If the needed size is smaller than the minimum or specified unit
1545  * size, the leftover is returned using @free_fn.
1546  *
1547  * RETURNS:
1548  * 0 on success, -errno on failure.
1549  */
1550 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1551 				  size_t atom_size,
1552 				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1553 				  pcpu_fc_alloc_fn_t alloc_fn,
1554 				  pcpu_fc_free_fn_t free_fn)
1555 {
1556 	void *base = (void *)ULONG_MAX;
1557 	void **areas = NULL;
1558 	struct pcpu_alloc_info *ai;
1559 	size_t size_sum, areas_size, max_distance;
1560 	int group, i, rc;
1561 
1562 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1563 				   cpu_distance_fn);
1564 	if (IS_ERR(ai))
1565 		return PTR_ERR(ai);
1566 
1567 	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1568 	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1569 
1570 	areas = alloc_bootmem_nopanic(areas_size);
1571 	if (!areas) {
1572 		rc = -ENOMEM;
1573 		goto out_free;
1574 	}
1575 
1576 	/* allocate, copy and determine base address */
1577 	for (group = 0; group < ai->nr_groups; group++) {
1578 		struct pcpu_group_info *gi = &ai->groups[group];
1579 		unsigned int cpu = NR_CPUS;
1580 		void *ptr;
1581 
1582 		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1583 			cpu = gi->cpu_map[i];
1584 		BUG_ON(cpu == NR_CPUS);
1585 
1586 		/* allocate space for the whole group */
1587 		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1588 		if (!ptr) {
1589 			rc = -ENOMEM;
1590 			goto out_free_areas;
1591 		}
1592 		areas[group] = ptr;
1593 
1594 		base = min(ptr, base);
1595 
1596 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1597 			if (gi->cpu_map[i] == NR_CPUS) {
1598 				/* unused unit, free whole */
1599 				free_fn(ptr, ai->unit_size);
1600 				continue;
1601 			}
1602 			/* copy and return the unused part */
1603 			memcpy(ptr, __per_cpu_load, ai->static_size);
1604 			free_fn(ptr + size_sum, ai->unit_size - size_sum);
1605 		}
1606 	}
1607 
1608 	/* base address is now known, determine group base offsets */
1609 	max_distance = 0;
1610 	for (group = 0; group < ai->nr_groups; group++) {
1611 		ai->groups[group].base_offset = areas[group] - base;
1612 		max_distance = max_t(size_t, max_distance,
1613 				     ai->groups[group].base_offset);
1614 	}
1615 	max_distance += ai->unit_size;
1616 
1617 	/* warn if maximum distance is further than 75% of vmalloc space */
1618 	if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1619 		pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1620 			   "space 0x%lx\n",
1621 			   max_distance, VMALLOC_END - VMALLOC_START);
1622 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1623 		/* and fail if we have fallback */
1624 		rc = -EINVAL;
1625 		goto out_free;
1626 #endif
1627 	}
1628 
1629 	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1630 		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1631 		ai->dyn_size, ai->unit_size);
1632 
1633 	rc = pcpu_setup_first_chunk(ai, base);
1634 	goto out_free;
1635 
1636 out_free_areas:
1637 	for (group = 0; group < ai->nr_groups; group++)
1638 		free_fn(areas[group],
1639 			ai->groups[group].nr_units * ai->unit_size);
1640 out_free:
1641 	pcpu_free_alloc_info(ai);
1642 	if (areas)
1643 		free_bootmem(__pa(areas), areas_size);
1644 	return rc;
1645 }
1646 #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1647 	  !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1648 
1649 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1650 /**
1651  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1652  * @reserved_size: the size of reserved percpu area in bytes
1653  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1654  * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1655  * @populate_pte_fn: function to populate pte
1656  *
1657  * This is a helper to ease setting up page-remapped first percpu
1658  * chunk and can be called where pcpu_setup_first_chunk() is expected.
1659  *
1660  * This is the basic allocator.  Static percpu area is allocated
1661  * page-by-page into vmalloc area.
1662  *
1663  * RETURNS:
1664  * 0 on success, -errno on failure.
1665  */
1666 int __init pcpu_page_first_chunk(size_t reserved_size,
1667 				 pcpu_fc_alloc_fn_t alloc_fn,
1668 				 pcpu_fc_free_fn_t free_fn,
1669 				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1670 {
1671 	static struct vm_struct vm;
1672 	struct pcpu_alloc_info *ai;
1673 	char psize_str[16];
1674 	int unit_pages;
1675 	size_t pages_size;
1676 	struct page **pages;
1677 	int unit, i, j, rc;
1678 
1679 	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1680 
1681 	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1682 	if (IS_ERR(ai))
1683 		return PTR_ERR(ai);
1684 	BUG_ON(ai->nr_groups != 1);
1685 	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1686 
1687 	unit_pages = ai->unit_size >> PAGE_SHIFT;
1688 
1689 	/* unaligned allocations can't be freed, round up to page size */
1690 	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1691 			       sizeof(pages[0]));
1692 	pages = alloc_bootmem(pages_size);
1693 
1694 	/* allocate pages */
1695 	j = 0;
1696 	for (unit = 0; unit < num_possible_cpus(); unit++)
1697 		for (i = 0; i < unit_pages; i++) {
1698 			unsigned int cpu = ai->groups[0].cpu_map[unit];
1699 			void *ptr;
1700 
1701 			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1702 			if (!ptr) {
1703 				pr_warning("PERCPU: failed to allocate %s page "
1704 					   "for cpu%u\n", psize_str, cpu);
1705 				goto enomem;
1706 			}
1707 			pages[j++] = virt_to_page(ptr);
1708 		}
1709 
1710 	/* allocate vm area, map the pages and copy static data */
1711 	vm.flags = VM_ALLOC;
1712 	vm.size = num_possible_cpus() * ai->unit_size;
1713 	vm_area_register_early(&vm, PAGE_SIZE);
1714 
1715 	for (unit = 0; unit < num_possible_cpus(); unit++) {
1716 		unsigned long unit_addr =
1717 			(unsigned long)vm.addr + unit * ai->unit_size;
1718 
1719 		for (i = 0; i < unit_pages; i++)
1720 			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1721 
1722 		/* pte already populated, the following shouldn't fail */
1723 		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1724 				      unit_pages);
1725 		if (rc < 0)
1726 			panic("failed to map percpu area, err=%d\n", rc);
1727 
1728 		/*
1729 		 * FIXME: Archs with virtual cache should flush local
1730 		 * cache for the linear mapping here - something
1731 		 * equivalent to flush_cache_vmap() on the local cpu.
1732 		 * flush_cache_vmap() can't be used as most supporting
1733 		 * data structures are not set up yet.
1734 		 */
1735 
1736 		/* copy static data */
1737 		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1738 	}
1739 
1740 	/* we're ready, commit */
1741 	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1742 		unit_pages, psize_str, vm.addr, ai->static_size,
1743 		ai->reserved_size, ai->dyn_size);
1744 
1745 	rc = pcpu_setup_first_chunk(ai, vm.addr);
1746 	goto out_free_ar;
1747 
1748 enomem:
1749 	while (--j >= 0)
1750 		free_fn(page_address(pages[j]), PAGE_SIZE);
1751 	rc = -ENOMEM;
1752 out_free_ar:
1753 	free_bootmem(__pa(pages), pages_size);
1754 	pcpu_free_alloc_info(ai);
1755 	return rc;
1756 }
1757 #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1758 
1759 /*
1760  * Generic percpu area setup.
1761  *
1762  * The embedding helper is used because its behavior closely resembles
1763  * the original non-dynamic generic percpu area setup.  This is
1764  * important because many archs have addressing restrictions and might
1765  * fail if the percpu area is located far away from the previous
1766  * location.  As an added bonus, in non-NUMA cases, embedding is
1767  * generally a good idea TLB-wise because percpu area can piggy back
1768  * on the physical linear memory mapping which uses large page
1769  * mappings on applicable archs.
1770  */
1771 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1772 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1773 EXPORT_SYMBOL(__per_cpu_offset);
1774 
1775 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1776 				       size_t align)
1777 {
1778 	return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1779 }
1780 
1781 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1782 {
1783 	free_bootmem(__pa(ptr), size);
1784 }
1785 
1786 void __init setup_per_cpu_areas(void)
1787 {
1788 	unsigned long delta;
1789 	unsigned int cpu;
1790 	int rc;
1791 
1792 	/*
1793 	 * Always reserve area for module percpu variables.  That's
1794 	 * what the legacy allocator did.
1795 	 */
1796 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1797 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1798 				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1799 	if (rc < 0)
1800 		panic("Failed to initialized percpu areas.");
1801 
1802 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1803 	for_each_possible_cpu(cpu)
1804 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1805 }
1806 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1807 
1808 /*
1809  * First and reserved chunks are initialized with temporary allocation
1810  * map in initdata so that they can be used before slab is online.
1811  * This function is called after slab is brought up and replaces those
1812  * with properly allocated maps.
1813  */
1814 void __init percpu_init_late(void)
1815 {
1816 	struct pcpu_chunk *target_chunks[] =
1817 		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1818 	struct pcpu_chunk *chunk;
1819 	unsigned long flags;
1820 	int i;
1821 
1822 	for (i = 0; (chunk = target_chunks[i]); i++) {
1823 		int *map;
1824 		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1825 
1826 		BUILD_BUG_ON(size > PAGE_SIZE);
1827 
1828 		map = pcpu_mem_alloc(size);
1829 		BUG_ON(!map);
1830 
1831 		spin_lock_irqsave(&pcpu_lock, flags);
1832 		memcpy(map, chunk->map, size);
1833 		chunk->map = map;
1834 		spin_unlock_irqrestore(&pcpu_lock, flags);
1835 	}
1836 }
1837