xref: /openbmc/linux/mm/percpu.c (revision 4bdf0bb7)
1 /*
2  * linux/mm/percpu.c - percpu memory allocator
3  *
4  * Copyright (C) 2009		SUSE Linux Products GmbH
5  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6  *
7  * This file is released under the GPLv2.
8  *
9  * This is percpu allocator which can handle both static and dynamic
10  * areas.  Percpu areas are allocated in chunks in vmalloc area.  Each
11  * chunk is consisted of boot-time determined number of units and the
12  * first chunk is used for static percpu variables in the kernel image
13  * (special boot time alloc/init handling necessary as these areas
14  * need to be brought up before allocation services are running).
15  * Unit grows as necessary and all units grow or shrink in unison.
16  * When a chunk is filled up, another chunk is allocated.  ie. in
17  * vmalloc area
18  *
19  *  c0                           c1                         c2
20  *  -------------------          -------------------        ------------
21  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
22  *  -------------------  ......  -------------------  ....  ------------
23  *
24  * Allocation is done in offset-size areas of single unit space.  Ie,
25  * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
26  * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
27  * cpus.  On NUMA, the mapping can be non-linear and even sparse.
28  * Percpu access can be done by configuring percpu base registers
29  * according to cpu to unit mapping and pcpu_unit_size.
30  *
31  * There are usually many small percpu allocations many of them being
32  * as small as 4 bytes.  The allocator organizes chunks into lists
33  * according to free size and tries to allocate from the fullest one.
34  * Each chunk keeps the maximum contiguous area size hint which is
35  * guaranteed to be eqaul to or larger than the maximum contiguous
36  * area in the chunk.  This helps the allocator not to iterate the
37  * chunk maps unnecessarily.
38  *
39  * Allocation state in each chunk is kept using an array of integers
40  * on chunk->map.  A positive value in the map represents a free
41  * region and negative allocated.  Allocation inside a chunk is done
42  * by scanning this map sequentially and serving the first matching
43  * entry.  This is mostly copied from the percpu_modalloc() allocator.
44  * Chunks can be determined from the address using the index field
45  * in the page struct. The index field contains a pointer to the chunk.
46  *
47  * To use this allocator, arch code should do the followings.
48  *
49  * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
50  *
51  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
52  *   regular address to percpu pointer and back if they need to be
53  *   different from the default
54  *
55  * - use pcpu_setup_first_chunk() during percpu area initialization to
56  *   setup the first chunk containing the kernel static percpu area
57  */
58 
59 #include <linux/bitmap.h>
60 #include <linux/bootmem.h>
61 #include <linux/err.h>
62 #include <linux/list.h>
63 #include <linux/log2.h>
64 #include <linux/mm.h>
65 #include <linux/module.h>
66 #include <linux/mutex.h>
67 #include <linux/percpu.h>
68 #include <linux/pfn.h>
69 #include <linux/slab.h>
70 #include <linux/spinlock.h>
71 #include <linux/vmalloc.h>
72 #include <linux/workqueue.h>
73 
74 #include <asm/cacheflush.h>
75 #include <asm/sections.h>
76 #include <asm/tlbflush.h>
77 
78 #define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
79 #define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
80 
81 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
82 #ifndef __addr_to_pcpu_ptr
83 #define __addr_to_pcpu_ptr(addr)					\
84 	(void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr	\
85 		 + (unsigned long)__per_cpu_start)
86 #endif
87 #ifndef __pcpu_ptr_to_addr
88 #define __pcpu_ptr_to_addr(ptr)						\
89 	(void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr	\
90 		 - (unsigned long)__per_cpu_start)
91 #endif
92 
93 struct pcpu_chunk {
94 	struct list_head	list;		/* linked to pcpu_slot lists */
95 	int			free_size;	/* free bytes in the chunk */
96 	int			contig_hint;	/* max contiguous size hint */
97 	void			*base_addr;	/* base address of this chunk */
98 	int			map_used;	/* # of map entries used */
99 	int			map_alloc;	/* # of map entries allocated */
100 	int			*map;		/* allocation map */
101 	struct vm_struct	**vms;		/* mapped vmalloc regions */
102 	bool			immutable;	/* no [de]population allowed */
103 	unsigned long		populated[];	/* populated bitmap */
104 };
105 
106 static int pcpu_unit_pages __read_mostly;
107 static int pcpu_unit_size __read_mostly;
108 static int pcpu_nr_units __read_mostly;
109 static int pcpu_atom_size __read_mostly;
110 static int pcpu_nr_slots __read_mostly;
111 static size_t pcpu_chunk_struct_size __read_mostly;
112 
113 /* cpus with the lowest and highest unit numbers */
114 static unsigned int pcpu_first_unit_cpu __read_mostly;
115 static unsigned int pcpu_last_unit_cpu __read_mostly;
116 
117 /* the address of the first chunk which starts with the kernel static area */
118 void *pcpu_base_addr __read_mostly;
119 EXPORT_SYMBOL_GPL(pcpu_base_addr);
120 
121 static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
122 const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
123 
124 /* group information, used for vm allocation */
125 static int pcpu_nr_groups __read_mostly;
126 static const unsigned long *pcpu_group_offsets __read_mostly;
127 static const size_t *pcpu_group_sizes __read_mostly;
128 
129 /*
130  * The first chunk which always exists.  Note that unlike other
131  * chunks, this one can be allocated and mapped in several different
132  * ways and thus often doesn't live in the vmalloc area.
133  */
134 static struct pcpu_chunk *pcpu_first_chunk;
135 
136 /*
137  * Optional reserved chunk.  This chunk reserves part of the first
138  * chunk and serves it for reserved allocations.  The amount of
139  * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
140  * area doesn't exist, the following variables contain NULL and 0
141  * respectively.
142  */
143 static struct pcpu_chunk *pcpu_reserved_chunk;
144 static int pcpu_reserved_chunk_limit;
145 
146 /*
147  * Synchronization rules.
148  *
149  * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
150  * protects allocation/reclaim paths, chunks, populated bitmap and
151  * vmalloc mapping.  The latter is a spinlock and protects the index
152  * data structures - chunk slots, chunks and area maps in chunks.
153  *
154  * During allocation, pcpu_alloc_mutex is kept locked all the time and
155  * pcpu_lock is grabbed and released as necessary.  All actual memory
156  * allocations are done using GFP_KERNEL with pcpu_lock released.
157  *
158  * Free path accesses and alters only the index data structures, so it
159  * can be safely called from atomic context.  When memory needs to be
160  * returned to the system, free path schedules reclaim_work which
161  * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
162  * reclaimed, release both locks and frees the chunks.  Note that it's
163  * necessary to grab both locks to remove a chunk from circulation as
164  * allocation path might be referencing the chunk with only
165  * pcpu_alloc_mutex locked.
166  */
167 static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
168 static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
169 
170 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
171 
172 /* reclaim work to release fully free chunks, scheduled from free path */
173 static void pcpu_reclaim(struct work_struct *work);
174 static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
175 
176 static int __pcpu_size_to_slot(int size)
177 {
178 	int highbit = fls(size);	/* size is in bytes */
179 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
180 }
181 
182 static int pcpu_size_to_slot(int size)
183 {
184 	if (size == pcpu_unit_size)
185 		return pcpu_nr_slots - 1;
186 	return __pcpu_size_to_slot(size);
187 }
188 
189 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
190 {
191 	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
192 		return 0;
193 
194 	return pcpu_size_to_slot(chunk->free_size);
195 }
196 
197 static int pcpu_page_idx(unsigned int cpu, int page_idx)
198 {
199 	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
200 }
201 
202 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
203 				     unsigned int cpu, int page_idx)
204 {
205 	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
206 		(page_idx << PAGE_SHIFT);
207 }
208 
209 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
210 				    unsigned int cpu, int page_idx)
211 {
212 	/* must not be used on pre-mapped chunk */
213 	WARN_ON(chunk->immutable);
214 
215 	return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
216 }
217 
218 /* set the pointer to a chunk in a page struct */
219 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
220 {
221 	page->index = (unsigned long)pcpu;
222 }
223 
224 /* obtain pointer to a chunk from a page struct */
225 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
226 {
227 	return (struct pcpu_chunk *)page->index;
228 }
229 
230 static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
231 {
232 	*rs = find_next_zero_bit(chunk->populated, end, *rs);
233 	*re = find_next_bit(chunk->populated, end, *rs + 1);
234 }
235 
236 static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
237 {
238 	*rs = find_next_bit(chunk->populated, end, *rs);
239 	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
240 }
241 
242 /*
243  * (Un)populated page region iterators.  Iterate over (un)populated
244  * page regions betwen @start and @end in @chunk.  @rs and @re should
245  * be integer variables and will be set to start and end page index of
246  * the current region.
247  */
248 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
249 	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
250 	     (rs) < (re);						    \
251 	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
252 
253 #define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
254 	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
255 	     (rs) < (re);						    \
256 	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
257 
258 /**
259  * pcpu_mem_alloc - allocate memory
260  * @size: bytes to allocate
261  *
262  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
263  * kzalloc() is used; otherwise, vmalloc() is used.  The returned
264  * memory is always zeroed.
265  *
266  * CONTEXT:
267  * Does GFP_KERNEL allocation.
268  *
269  * RETURNS:
270  * Pointer to the allocated area on success, NULL on failure.
271  */
272 static void *pcpu_mem_alloc(size_t size)
273 {
274 	if (size <= PAGE_SIZE)
275 		return kzalloc(size, GFP_KERNEL);
276 	else {
277 		void *ptr = vmalloc(size);
278 		if (ptr)
279 			memset(ptr, 0, size);
280 		return ptr;
281 	}
282 }
283 
284 /**
285  * pcpu_mem_free - free memory
286  * @ptr: memory to free
287  * @size: size of the area
288  *
289  * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
290  */
291 static void pcpu_mem_free(void *ptr, size_t size)
292 {
293 	if (size <= PAGE_SIZE)
294 		kfree(ptr);
295 	else
296 		vfree(ptr);
297 }
298 
299 /**
300  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
301  * @chunk: chunk of interest
302  * @oslot: the previous slot it was on
303  *
304  * This function is called after an allocation or free changed @chunk.
305  * New slot according to the changed state is determined and @chunk is
306  * moved to the slot.  Note that the reserved chunk is never put on
307  * chunk slots.
308  *
309  * CONTEXT:
310  * pcpu_lock.
311  */
312 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
313 {
314 	int nslot = pcpu_chunk_slot(chunk);
315 
316 	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
317 		if (oslot < nslot)
318 			list_move(&chunk->list, &pcpu_slot[nslot]);
319 		else
320 			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
321 	}
322 }
323 
324 /**
325  * pcpu_chunk_addr_search - determine chunk containing specified address
326  * @addr: address for which the chunk needs to be determined.
327  *
328  * RETURNS:
329  * The address of the found chunk.
330  */
331 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
332 {
333 	void *first_start = pcpu_first_chunk->base_addr;
334 
335 	/* is it in the first chunk? */
336 	if (addr >= first_start && addr < first_start + pcpu_unit_size) {
337 		/* is it in the reserved area? */
338 		if (addr < first_start + pcpu_reserved_chunk_limit)
339 			return pcpu_reserved_chunk;
340 		return pcpu_first_chunk;
341 	}
342 
343 	/*
344 	 * The address is relative to unit0 which might be unused and
345 	 * thus unmapped.  Offset the address to the unit space of the
346 	 * current processor before looking it up in the vmalloc
347 	 * space.  Note that any possible cpu id can be used here, so
348 	 * there's no need to worry about preemption or cpu hotplug.
349 	 */
350 	addr += pcpu_unit_offsets[raw_smp_processor_id()];
351 	return pcpu_get_page_chunk(vmalloc_to_page(addr));
352 }
353 
354 /**
355  * pcpu_extend_area_map - extend area map for allocation
356  * @chunk: target chunk
357  *
358  * Extend area map of @chunk so that it can accomodate an allocation.
359  * A single allocation can split an area into three areas, so this
360  * function makes sure that @chunk->map has at least two extra slots.
361  *
362  * CONTEXT:
363  * pcpu_alloc_mutex, pcpu_lock.  pcpu_lock is released and reacquired
364  * if area map is extended.
365  *
366  * RETURNS:
367  * 0 if noop, 1 if successfully extended, -errno on failure.
368  */
369 static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
370 {
371 	int new_alloc;
372 	int *new;
373 	size_t size;
374 
375 	/* has enough? */
376 	if (chunk->map_alloc >= chunk->map_used + 2)
377 		return 0;
378 
379 	spin_unlock_irq(&pcpu_lock);
380 
381 	new_alloc = PCPU_DFL_MAP_ALLOC;
382 	while (new_alloc < chunk->map_used + 2)
383 		new_alloc *= 2;
384 
385 	new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
386 	if (!new) {
387 		spin_lock_irq(&pcpu_lock);
388 		return -ENOMEM;
389 	}
390 
391 	/*
392 	 * Acquire pcpu_lock and switch to new area map.  Only free
393 	 * could have happened inbetween, so map_used couldn't have
394 	 * grown.
395 	 */
396 	spin_lock_irq(&pcpu_lock);
397 	BUG_ON(new_alloc < chunk->map_used + 2);
398 
399 	size = chunk->map_alloc * sizeof(chunk->map[0]);
400 	memcpy(new, chunk->map, size);
401 
402 	/*
403 	 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
404 	 * one of the first chunks and still using static map.
405 	 */
406 	if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
407 		pcpu_mem_free(chunk->map, size);
408 
409 	chunk->map_alloc = new_alloc;
410 	chunk->map = new;
411 	return 0;
412 }
413 
414 /**
415  * pcpu_split_block - split a map block
416  * @chunk: chunk of interest
417  * @i: index of map block to split
418  * @head: head size in bytes (can be 0)
419  * @tail: tail size in bytes (can be 0)
420  *
421  * Split the @i'th map block into two or three blocks.  If @head is
422  * non-zero, @head bytes block is inserted before block @i moving it
423  * to @i+1 and reducing its size by @head bytes.
424  *
425  * If @tail is non-zero, the target block, which can be @i or @i+1
426  * depending on @head, is reduced by @tail bytes and @tail byte block
427  * is inserted after the target block.
428  *
429  * @chunk->map must have enough free slots to accomodate the split.
430  *
431  * CONTEXT:
432  * pcpu_lock.
433  */
434 static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
435 			     int head, int tail)
436 {
437 	int nr_extra = !!head + !!tail;
438 
439 	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
440 
441 	/* insert new subblocks */
442 	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
443 		sizeof(chunk->map[0]) * (chunk->map_used - i));
444 	chunk->map_used += nr_extra;
445 
446 	if (head) {
447 		chunk->map[i + 1] = chunk->map[i] - head;
448 		chunk->map[i++] = head;
449 	}
450 	if (tail) {
451 		chunk->map[i++] -= tail;
452 		chunk->map[i] = tail;
453 	}
454 }
455 
456 /**
457  * pcpu_alloc_area - allocate area from a pcpu_chunk
458  * @chunk: chunk of interest
459  * @size: wanted size in bytes
460  * @align: wanted align
461  *
462  * Try to allocate @size bytes area aligned at @align from @chunk.
463  * Note that this function only allocates the offset.  It doesn't
464  * populate or map the area.
465  *
466  * @chunk->map must have at least two free slots.
467  *
468  * CONTEXT:
469  * pcpu_lock.
470  *
471  * RETURNS:
472  * Allocated offset in @chunk on success, -1 if no matching area is
473  * found.
474  */
475 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
476 {
477 	int oslot = pcpu_chunk_slot(chunk);
478 	int max_contig = 0;
479 	int i, off;
480 
481 	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
482 		bool is_last = i + 1 == chunk->map_used;
483 		int head, tail;
484 
485 		/* extra for alignment requirement */
486 		head = ALIGN(off, align) - off;
487 		BUG_ON(i == 0 && head != 0);
488 
489 		if (chunk->map[i] < 0)
490 			continue;
491 		if (chunk->map[i] < head + size) {
492 			max_contig = max(chunk->map[i], max_contig);
493 			continue;
494 		}
495 
496 		/*
497 		 * If head is small or the previous block is free,
498 		 * merge'em.  Note that 'small' is defined as smaller
499 		 * than sizeof(int), which is very small but isn't too
500 		 * uncommon for percpu allocations.
501 		 */
502 		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
503 			if (chunk->map[i - 1] > 0)
504 				chunk->map[i - 1] += head;
505 			else {
506 				chunk->map[i - 1] -= head;
507 				chunk->free_size -= head;
508 			}
509 			chunk->map[i] -= head;
510 			off += head;
511 			head = 0;
512 		}
513 
514 		/* if tail is small, just keep it around */
515 		tail = chunk->map[i] - head - size;
516 		if (tail < sizeof(int))
517 			tail = 0;
518 
519 		/* split if warranted */
520 		if (head || tail) {
521 			pcpu_split_block(chunk, i, head, tail);
522 			if (head) {
523 				i++;
524 				off += head;
525 				max_contig = max(chunk->map[i - 1], max_contig);
526 			}
527 			if (tail)
528 				max_contig = max(chunk->map[i + 1], max_contig);
529 		}
530 
531 		/* update hint and mark allocated */
532 		if (is_last)
533 			chunk->contig_hint = max_contig; /* fully scanned */
534 		else
535 			chunk->contig_hint = max(chunk->contig_hint,
536 						 max_contig);
537 
538 		chunk->free_size -= chunk->map[i];
539 		chunk->map[i] = -chunk->map[i];
540 
541 		pcpu_chunk_relocate(chunk, oslot);
542 		return off;
543 	}
544 
545 	chunk->contig_hint = max_contig;	/* fully scanned */
546 	pcpu_chunk_relocate(chunk, oslot);
547 
548 	/* tell the upper layer that this chunk has no matching area */
549 	return -1;
550 }
551 
552 /**
553  * pcpu_free_area - free area to a pcpu_chunk
554  * @chunk: chunk of interest
555  * @freeme: offset of area to free
556  *
557  * Free area starting from @freeme to @chunk.  Note that this function
558  * only modifies the allocation map.  It doesn't depopulate or unmap
559  * the area.
560  *
561  * CONTEXT:
562  * pcpu_lock.
563  */
564 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
565 {
566 	int oslot = pcpu_chunk_slot(chunk);
567 	int i, off;
568 
569 	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
570 		if (off == freeme)
571 			break;
572 	BUG_ON(off != freeme);
573 	BUG_ON(chunk->map[i] > 0);
574 
575 	chunk->map[i] = -chunk->map[i];
576 	chunk->free_size += chunk->map[i];
577 
578 	/* merge with previous? */
579 	if (i > 0 && chunk->map[i - 1] >= 0) {
580 		chunk->map[i - 1] += chunk->map[i];
581 		chunk->map_used--;
582 		memmove(&chunk->map[i], &chunk->map[i + 1],
583 			(chunk->map_used - i) * sizeof(chunk->map[0]));
584 		i--;
585 	}
586 	/* merge with next? */
587 	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
588 		chunk->map[i] += chunk->map[i + 1];
589 		chunk->map_used--;
590 		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
591 			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
592 	}
593 
594 	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
595 	pcpu_chunk_relocate(chunk, oslot);
596 }
597 
598 /**
599  * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
600  * @chunk: chunk of interest
601  * @bitmapp: output parameter for bitmap
602  * @may_alloc: may allocate the array
603  *
604  * Returns pointer to array of pointers to struct page and bitmap,
605  * both of which can be indexed with pcpu_page_idx().  The returned
606  * array is cleared to zero and *@bitmapp is copied from
607  * @chunk->populated.  Note that there is only one array and bitmap
608  * and access exclusion is the caller's responsibility.
609  *
610  * CONTEXT:
611  * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
612  * Otherwise, don't care.
613  *
614  * RETURNS:
615  * Pointer to temp pages array on success, NULL on failure.
616  */
617 static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
618 					       unsigned long **bitmapp,
619 					       bool may_alloc)
620 {
621 	static struct page **pages;
622 	static unsigned long *bitmap;
623 	size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
624 	size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
625 			     sizeof(unsigned long);
626 
627 	if (!pages || !bitmap) {
628 		if (may_alloc && !pages)
629 			pages = pcpu_mem_alloc(pages_size);
630 		if (may_alloc && !bitmap)
631 			bitmap = pcpu_mem_alloc(bitmap_size);
632 		if (!pages || !bitmap)
633 			return NULL;
634 	}
635 
636 	memset(pages, 0, pages_size);
637 	bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
638 
639 	*bitmapp = bitmap;
640 	return pages;
641 }
642 
643 /**
644  * pcpu_free_pages - free pages which were allocated for @chunk
645  * @chunk: chunk pages were allocated for
646  * @pages: array of pages to be freed, indexed by pcpu_page_idx()
647  * @populated: populated bitmap
648  * @page_start: page index of the first page to be freed
649  * @page_end: page index of the last page to be freed + 1
650  *
651  * Free pages [@page_start and @page_end) in @pages for all units.
652  * The pages were allocated for @chunk.
653  */
654 static void pcpu_free_pages(struct pcpu_chunk *chunk,
655 			    struct page **pages, unsigned long *populated,
656 			    int page_start, int page_end)
657 {
658 	unsigned int cpu;
659 	int i;
660 
661 	for_each_possible_cpu(cpu) {
662 		for (i = page_start; i < page_end; i++) {
663 			struct page *page = pages[pcpu_page_idx(cpu, i)];
664 
665 			if (page)
666 				__free_page(page);
667 		}
668 	}
669 }
670 
671 /**
672  * pcpu_alloc_pages - allocates pages for @chunk
673  * @chunk: target chunk
674  * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
675  * @populated: populated bitmap
676  * @page_start: page index of the first page to be allocated
677  * @page_end: page index of the last page to be allocated + 1
678  *
679  * Allocate pages [@page_start,@page_end) into @pages for all units.
680  * The allocation is for @chunk.  Percpu core doesn't care about the
681  * content of @pages and will pass it verbatim to pcpu_map_pages().
682  */
683 static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
684 			    struct page **pages, unsigned long *populated,
685 			    int page_start, int page_end)
686 {
687 	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
688 	unsigned int cpu;
689 	int i;
690 
691 	for_each_possible_cpu(cpu) {
692 		for (i = page_start; i < page_end; i++) {
693 			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
694 
695 			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
696 			if (!*pagep) {
697 				pcpu_free_pages(chunk, pages, populated,
698 						page_start, page_end);
699 				return -ENOMEM;
700 			}
701 		}
702 	}
703 	return 0;
704 }
705 
706 /**
707  * pcpu_pre_unmap_flush - flush cache prior to unmapping
708  * @chunk: chunk the regions to be flushed belongs to
709  * @page_start: page index of the first page to be flushed
710  * @page_end: page index of the last page to be flushed + 1
711  *
712  * Pages in [@page_start,@page_end) of @chunk are about to be
713  * unmapped.  Flush cache.  As each flushing trial can be very
714  * expensive, issue flush on the whole region at once rather than
715  * doing it for each cpu.  This could be an overkill but is more
716  * scalable.
717  */
718 static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
719 				 int page_start, int page_end)
720 {
721 	flush_cache_vunmap(
722 		pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
723 		pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
724 }
725 
726 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
727 {
728 	unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
729 }
730 
731 /**
732  * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
733  * @chunk: chunk of interest
734  * @pages: pages array which can be used to pass information to free
735  * @populated: populated bitmap
736  * @page_start: page index of the first page to unmap
737  * @page_end: page index of the last page to unmap + 1
738  *
739  * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
740  * Corresponding elements in @pages were cleared by the caller and can
741  * be used to carry information to pcpu_free_pages() which will be
742  * called after all unmaps are finished.  The caller should call
743  * proper pre/post flush functions.
744  */
745 static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
746 			     struct page **pages, unsigned long *populated,
747 			     int page_start, int page_end)
748 {
749 	unsigned int cpu;
750 	int i;
751 
752 	for_each_possible_cpu(cpu) {
753 		for (i = page_start; i < page_end; i++) {
754 			struct page *page;
755 
756 			page = pcpu_chunk_page(chunk, cpu, i);
757 			WARN_ON(!page);
758 			pages[pcpu_page_idx(cpu, i)] = page;
759 		}
760 		__pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
761 				   page_end - page_start);
762 	}
763 
764 	for (i = page_start; i < page_end; i++)
765 		__clear_bit(i, populated);
766 }
767 
768 /**
769  * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
770  * @chunk: pcpu_chunk the regions to be flushed belong to
771  * @page_start: page index of the first page to be flushed
772  * @page_end: page index of the last page to be flushed + 1
773  *
774  * Pages [@page_start,@page_end) of @chunk have been unmapped.  Flush
775  * TLB for the regions.  This can be skipped if the area is to be
776  * returned to vmalloc as vmalloc will handle TLB flushing lazily.
777  *
778  * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
779  * for the whole region.
780  */
781 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
782 				      int page_start, int page_end)
783 {
784 	flush_tlb_kernel_range(
785 		pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
786 		pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
787 }
788 
789 static int __pcpu_map_pages(unsigned long addr, struct page **pages,
790 			    int nr_pages)
791 {
792 	return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
793 					PAGE_KERNEL, pages);
794 }
795 
796 /**
797  * pcpu_map_pages - map pages into a pcpu_chunk
798  * @chunk: chunk of interest
799  * @pages: pages array containing pages to be mapped
800  * @populated: populated bitmap
801  * @page_start: page index of the first page to map
802  * @page_end: page index of the last page to map + 1
803  *
804  * For each cpu, map pages [@page_start,@page_end) into @chunk.  The
805  * caller is responsible for calling pcpu_post_map_flush() after all
806  * mappings are complete.
807  *
808  * This function is responsible for setting corresponding bits in
809  * @chunk->populated bitmap and whatever is necessary for reverse
810  * lookup (addr -> chunk).
811  */
812 static int pcpu_map_pages(struct pcpu_chunk *chunk,
813 			  struct page **pages, unsigned long *populated,
814 			  int page_start, int page_end)
815 {
816 	unsigned int cpu, tcpu;
817 	int i, err;
818 
819 	for_each_possible_cpu(cpu) {
820 		err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
821 				       &pages[pcpu_page_idx(cpu, page_start)],
822 				       page_end - page_start);
823 		if (err < 0)
824 			goto err;
825 	}
826 
827 	/* mapping successful, link chunk and mark populated */
828 	for (i = page_start; i < page_end; i++) {
829 		for_each_possible_cpu(cpu)
830 			pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
831 					    chunk);
832 		__set_bit(i, populated);
833 	}
834 
835 	return 0;
836 
837 err:
838 	for_each_possible_cpu(tcpu) {
839 		if (tcpu == cpu)
840 			break;
841 		__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
842 				   page_end - page_start);
843 	}
844 	return err;
845 }
846 
847 /**
848  * pcpu_post_map_flush - flush cache after mapping
849  * @chunk: pcpu_chunk the regions to be flushed belong to
850  * @page_start: page index of the first page to be flushed
851  * @page_end: page index of the last page to be flushed + 1
852  *
853  * Pages [@page_start,@page_end) of @chunk have been mapped.  Flush
854  * cache.
855  *
856  * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
857  * for the whole region.
858  */
859 static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
860 				int page_start, int page_end)
861 {
862 	flush_cache_vmap(
863 		pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
864 		pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
865 }
866 
867 /**
868  * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
869  * @chunk: chunk to depopulate
870  * @off: offset to the area to depopulate
871  * @size: size of the area to depopulate in bytes
872  * @flush: whether to flush cache and tlb or not
873  *
874  * For each cpu, depopulate and unmap pages [@page_start,@page_end)
875  * from @chunk.  If @flush is true, vcache is flushed before unmapping
876  * and tlb after.
877  *
878  * CONTEXT:
879  * pcpu_alloc_mutex.
880  */
881 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
882 {
883 	int page_start = PFN_DOWN(off);
884 	int page_end = PFN_UP(off + size);
885 	struct page **pages;
886 	unsigned long *populated;
887 	int rs, re;
888 
889 	/* quick path, check whether it's empty already */
890 	pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
891 		if (rs == page_start && re == page_end)
892 			return;
893 		break;
894 	}
895 
896 	/* immutable chunks can't be depopulated */
897 	WARN_ON(chunk->immutable);
898 
899 	/*
900 	 * If control reaches here, there must have been at least one
901 	 * successful population attempt so the temp pages array must
902 	 * be available now.
903 	 */
904 	pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
905 	BUG_ON(!pages);
906 
907 	/* unmap and free */
908 	pcpu_pre_unmap_flush(chunk, page_start, page_end);
909 
910 	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
911 		pcpu_unmap_pages(chunk, pages, populated, rs, re);
912 
913 	/* no need to flush tlb, vmalloc will handle it lazily */
914 
915 	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
916 		pcpu_free_pages(chunk, pages, populated, rs, re);
917 
918 	/* commit new bitmap */
919 	bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
920 }
921 
922 /**
923  * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
924  * @chunk: chunk of interest
925  * @off: offset to the area to populate
926  * @size: size of the area to populate in bytes
927  *
928  * For each cpu, populate and map pages [@page_start,@page_end) into
929  * @chunk.  The area is cleared on return.
930  *
931  * CONTEXT:
932  * pcpu_alloc_mutex, does GFP_KERNEL allocation.
933  */
934 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
935 {
936 	int page_start = PFN_DOWN(off);
937 	int page_end = PFN_UP(off + size);
938 	int free_end = page_start, unmap_end = page_start;
939 	struct page **pages;
940 	unsigned long *populated;
941 	unsigned int cpu;
942 	int rs, re, rc;
943 
944 	/* quick path, check whether all pages are already there */
945 	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
946 		if (rs == page_start && re == page_end)
947 			goto clear;
948 		break;
949 	}
950 
951 	/* need to allocate and map pages, this chunk can't be immutable */
952 	WARN_ON(chunk->immutable);
953 
954 	pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
955 	if (!pages)
956 		return -ENOMEM;
957 
958 	/* alloc and map */
959 	pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
960 		rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
961 		if (rc)
962 			goto err_free;
963 		free_end = re;
964 	}
965 
966 	pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
967 		rc = pcpu_map_pages(chunk, pages, populated, rs, re);
968 		if (rc)
969 			goto err_unmap;
970 		unmap_end = re;
971 	}
972 	pcpu_post_map_flush(chunk, page_start, page_end);
973 
974 	/* commit new bitmap */
975 	bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
976 clear:
977 	for_each_possible_cpu(cpu)
978 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
979 	return 0;
980 
981 err_unmap:
982 	pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
983 	pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
984 		pcpu_unmap_pages(chunk, pages, populated, rs, re);
985 	pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
986 err_free:
987 	pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
988 		pcpu_free_pages(chunk, pages, populated, rs, re);
989 	return rc;
990 }
991 
992 static void free_pcpu_chunk(struct pcpu_chunk *chunk)
993 {
994 	if (!chunk)
995 		return;
996 	if (chunk->vms)
997 		pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
998 	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
999 	kfree(chunk);
1000 }
1001 
1002 static struct pcpu_chunk *alloc_pcpu_chunk(void)
1003 {
1004 	struct pcpu_chunk *chunk;
1005 
1006 	chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
1007 	if (!chunk)
1008 		return NULL;
1009 
1010 	chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
1011 	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
1012 	chunk->map[chunk->map_used++] = pcpu_unit_size;
1013 
1014 	chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
1015 				       pcpu_nr_groups, pcpu_atom_size,
1016 				       GFP_KERNEL);
1017 	if (!chunk->vms) {
1018 		free_pcpu_chunk(chunk);
1019 		return NULL;
1020 	}
1021 
1022 	INIT_LIST_HEAD(&chunk->list);
1023 	chunk->free_size = pcpu_unit_size;
1024 	chunk->contig_hint = pcpu_unit_size;
1025 	chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
1026 
1027 	return chunk;
1028 }
1029 
1030 /**
1031  * pcpu_alloc - the percpu allocator
1032  * @size: size of area to allocate in bytes
1033  * @align: alignment of area (max PAGE_SIZE)
1034  * @reserved: allocate from the reserved chunk if available
1035  *
1036  * Allocate percpu area of @size bytes aligned at @align.
1037  *
1038  * CONTEXT:
1039  * Does GFP_KERNEL allocation.
1040  *
1041  * RETURNS:
1042  * Percpu pointer to the allocated area on success, NULL on failure.
1043  */
1044 static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1045 {
1046 	static int warn_limit = 10;
1047 	struct pcpu_chunk *chunk;
1048 	const char *err;
1049 	int slot, off;
1050 
1051 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
1052 		WARN(true, "illegal size (%zu) or align (%zu) for "
1053 		     "percpu allocation\n", size, align);
1054 		return NULL;
1055 	}
1056 
1057 	mutex_lock(&pcpu_alloc_mutex);
1058 	spin_lock_irq(&pcpu_lock);
1059 
1060 	/* serve reserved allocations from the reserved chunk if available */
1061 	if (reserved && pcpu_reserved_chunk) {
1062 		chunk = pcpu_reserved_chunk;
1063 		if (size > chunk->contig_hint ||
1064 		    pcpu_extend_area_map(chunk) < 0) {
1065 			err = "failed to extend area map of reserved chunk";
1066 			goto fail_unlock;
1067 		}
1068 		off = pcpu_alloc_area(chunk, size, align);
1069 		if (off >= 0)
1070 			goto area_found;
1071 		err = "alloc from reserved chunk failed";
1072 		goto fail_unlock;
1073 	}
1074 
1075 restart:
1076 	/* search through normal chunks */
1077 	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1078 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1079 			if (size > chunk->contig_hint)
1080 				continue;
1081 
1082 			switch (pcpu_extend_area_map(chunk)) {
1083 			case 0:
1084 				break;
1085 			case 1:
1086 				goto restart;	/* pcpu_lock dropped, restart */
1087 			default:
1088 				err = "failed to extend area map";
1089 				goto fail_unlock;
1090 			}
1091 
1092 			off = pcpu_alloc_area(chunk, size, align);
1093 			if (off >= 0)
1094 				goto area_found;
1095 		}
1096 	}
1097 
1098 	/* hmmm... no space left, create a new chunk */
1099 	spin_unlock_irq(&pcpu_lock);
1100 
1101 	chunk = alloc_pcpu_chunk();
1102 	if (!chunk) {
1103 		err = "failed to allocate new chunk";
1104 		goto fail_unlock_mutex;
1105 	}
1106 
1107 	spin_lock_irq(&pcpu_lock);
1108 	pcpu_chunk_relocate(chunk, -1);
1109 	goto restart;
1110 
1111 area_found:
1112 	spin_unlock_irq(&pcpu_lock);
1113 
1114 	/* populate, map and clear the area */
1115 	if (pcpu_populate_chunk(chunk, off, size)) {
1116 		spin_lock_irq(&pcpu_lock);
1117 		pcpu_free_area(chunk, off);
1118 		err = "failed to populate";
1119 		goto fail_unlock;
1120 	}
1121 
1122 	mutex_unlock(&pcpu_alloc_mutex);
1123 
1124 	/* return address relative to base address */
1125 	return __addr_to_pcpu_ptr(chunk->base_addr + off);
1126 
1127 fail_unlock:
1128 	spin_unlock_irq(&pcpu_lock);
1129 fail_unlock_mutex:
1130 	mutex_unlock(&pcpu_alloc_mutex);
1131 	if (warn_limit) {
1132 		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
1133 			   "%s\n", size, align, err);
1134 		dump_stack();
1135 		if (!--warn_limit)
1136 			pr_info("PERCPU: limit reached, disable warning\n");
1137 	}
1138 	return NULL;
1139 }
1140 
1141 /**
1142  * __alloc_percpu - allocate dynamic percpu area
1143  * @size: size of area to allocate in bytes
1144  * @align: alignment of area (max PAGE_SIZE)
1145  *
1146  * Allocate percpu area of @size bytes aligned at @align.  Might
1147  * sleep.  Might trigger writeouts.
1148  *
1149  * CONTEXT:
1150  * Does GFP_KERNEL allocation.
1151  *
1152  * RETURNS:
1153  * Percpu pointer to the allocated area on success, NULL on failure.
1154  */
1155 void *__alloc_percpu(size_t size, size_t align)
1156 {
1157 	return pcpu_alloc(size, align, false);
1158 }
1159 EXPORT_SYMBOL_GPL(__alloc_percpu);
1160 
1161 /**
1162  * __alloc_reserved_percpu - allocate reserved percpu area
1163  * @size: size of area to allocate in bytes
1164  * @align: alignment of area (max PAGE_SIZE)
1165  *
1166  * Allocate percpu area of @size bytes aligned at @align from reserved
1167  * percpu area if arch has set it up; otherwise, allocation is served
1168  * from the same dynamic area.  Might sleep.  Might trigger writeouts.
1169  *
1170  * CONTEXT:
1171  * Does GFP_KERNEL allocation.
1172  *
1173  * RETURNS:
1174  * Percpu pointer to the allocated area on success, NULL on failure.
1175  */
1176 void *__alloc_reserved_percpu(size_t size, size_t align)
1177 {
1178 	return pcpu_alloc(size, align, true);
1179 }
1180 
1181 /**
1182  * pcpu_reclaim - reclaim fully free chunks, workqueue function
1183  * @work: unused
1184  *
1185  * Reclaim all fully free chunks except for the first one.
1186  *
1187  * CONTEXT:
1188  * workqueue context.
1189  */
1190 static void pcpu_reclaim(struct work_struct *work)
1191 {
1192 	LIST_HEAD(todo);
1193 	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
1194 	struct pcpu_chunk *chunk, *next;
1195 
1196 	mutex_lock(&pcpu_alloc_mutex);
1197 	spin_lock_irq(&pcpu_lock);
1198 
1199 	list_for_each_entry_safe(chunk, next, head, list) {
1200 		WARN_ON(chunk->immutable);
1201 
1202 		/* spare the first one */
1203 		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
1204 			continue;
1205 
1206 		list_move(&chunk->list, &todo);
1207 	}
1208 
1209 	spin_unlock_irq(&pcpu_lock);
1210 
1211 	list_for_each_entry_safe(chunk, next, &todo, list) {
1212 		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
1213 		free_pcpu_chunk(chunk);
1214 	}
1215 
1216 	mutex_unlock(&pcpu_alloc_mutex);
1217 }
1218 
1219 /**
1220  * free_percpu - free percpu area
1221  * @ptr: pointer to area to free
1222  *
1223  * Free percpu area @ptr.
1224  *
1225  * CONTEXT:
1226  * Can be called from atomic context.
1227  */
1228 void free_percpu(void *ptr)
1229 {
1230 	void *addr = __pcpu_ptr_to_addr(ptr);
1231 	struct pcpu_chunk *chunk;
1232 	unsigned long flags;
1233 	int off;
1234 
1235 	if (!ptr)
1236 		return;
1237 
1238 	spin_lock_irqsave(&pcpu_lock, flags);
1239 
1240 	chunk = pcpu_chunk_addr_search(addr);
1241 	off = addr - chunk->base_addr;
1242 
1243 	pcpu_free_area(chunk, off);
1244 
1245 	/* if there are more than one fully free chunks, wake up grim reaper */
1246 	if (chunk->free_size == pcpu_unit_size) {
1247 		struct pcpu_chunk *pos;
1248 
1249 		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1250 			if (pos != chunk) {
1251 				schedule_work(&pcpu_reclaim_work);
1252 				break;
1253 			}
1254 	}
1255 
1256 	spin_unlock_irqrestore(&pcpu_lock, flags);
1257 }
1258 EXPORT_SYMBOL_GPL(free_percpu);
1259 
1260 static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1261 					size_t reserved_size,
1262 					ssize_t *dyn_sizep)
1263 {
1264 	size_t size_sum;
1265 
1266 	size_sum = PFN_ALIGN(static_size + reserved_size +
1267 			     (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1268 	if (*dyn_sizep != 0)
1269 		*dyn_sizep = size_sum - static_size - reserved_size;
1270 
1271 	return size_sum;
1272 }
1273 
1274 /**
1275  * pcpu_alloc_alloc_info - allocate percpu allocation info
1276  * @nr_groups: the number of groups
1277  * @nr_units: the number of units
1278  *
1279  * Allocate ai which is large enough for @nr_groups groups containing
1280  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1281  * cpu_map array which is long enough for @nr_units and filled with
1282  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1283  * pointer of other groups.
1284  *
1285  * RETURNS:
1286  * Pointer to the allocated pcpu_alloc_info on success, NULL on
1287  * failure.
1288  */
1289 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1290 						      int nr_units)
1291 {
1292 	struct pcpu_alloc_info *ai;
1293 	size_t base_size, ai_size;
1294 	void *ptr;
1295 	int unit;
1296 
1297 	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1298 			  __alignof__(ai->groups[0].cpu_map[0]));
1299 	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1300 
1301 	ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1302 	if (!ptr)
1303 		return NULL;
1304 	ai = ptr;
1305 	ptr += base_size;
1306 
1307 	ai->groups[0].cpu_map = ptr;
1308 
1309 	for (unit = 0; unit < nr_units; unit++)
1310 		ai->groups[0].cpu_map[unit] = NR_CPUS;
1311 
1312 	ai->nr_groups = nr_groups;
1313 	ai->__ai_size = PFN_ALIGN(ai_size);
1314 
1315 	return ai;
1316 }
1317 
1318 /**
1319  * pcpu_free_alloc_info - free percpu allocation info
1320  * @ai: pcpu_alloc_info to free
1321  *
1322  * Free @ai which was allocated by pcpu_alloc_alloc_info().
1323  */
1324 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1325 {
1326 	free_bootmem(__pa(ai), ai->__ai_size);
1327 }
1328 
1329 /**
1330  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1331  * @reserved_size: the size of reserved percpu area in bytes
1332  * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1333  * @atom_size: allocation atom size
1334  * @cpu_distance_fn: callback to determine distance between cpus, optional
1335  *
1336  * This function determines grouping of units, their mappings to cpus
1337  * and other parameters considering needed percpu size, allocation
1338  * atom size and distances between CPUs.
1339  *
1340  * Groups are always mutliples of atom size and CPUs which are of
1341  * LOCAL_DISTANCE both ways are grouped together and share space for
1342  * units in the same group.  The returned configuration is guaranteed
1343  * to have CPUs on different nodes on different groups and >=75% usage
1344  * of allocated virtual address space.
1345  *
1346  * RETURNS:
1347  * On success, pointer to the new allocation_info is returned.  On
1348  * failure, ERR_PTR value is returned.
1349  */
1350 struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1351 				size_t reserved_size, ssize_t dyn_size,
1352 				size_t atom_size,
1353 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1354 {
1355 	static int group_map[NR_CPUS] __initdata;
1356 	static int group_cnt[NR_CPUS] __initdata;
1357 	const size_t static_size = __per_cpu_end - __per_cpu_start;
1358 	int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
1359 	size_t size_sum, min_unit_size, alloc_size;
1360 	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1361 	int last_allocs, group, unit;
1362 	unsigned int cpu, tcpu;
1363 	struct pcpu_alloc_info *ai;
1364 	unsigned int *cpu_map;
1365 
1366 	/* this function may be called multiple times */
1367 	memset(group_map, 0, sizeof(group_map));
1368 	memset(group_cnt, 0, sizeof(group_map));
1369 
1370 	/*
1371 	 * Determine min_unit_size, alloc_size and max_upa such that
1372 	 * alloc_size is multiple of atom_size and is the smallest
1373 	 * which can accomodate 4k aligned segments which are equal to
1374 	 * or larger than min_unit_size.
1375 	 */
1376 	size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1377 	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1378 
1379 	alloc_size = roundup(min_unit_size, atom_size);
1380 	upa = alloc_size / min_unit_size;
1381 	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1382 		upa--;
1383 	max_upa = upa;
1384 
1385 	/* group cpus according to their proximity */
1386 	for_each_possible_cpu(cpu) {
1387 		group = 0;
1388 	next_group:
1389 		for_each_possible_cpu(tcpu) {
1390 			if (cpu == tcpu)
1391 				break;
1392 			if (group_map[tcpu] == group && cpu_distance_fn &&
1393 			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1394 			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1395 				group++;
1396 				nr_groups = max(nr_groups, group + 1);
1397 				goto next_group;
1398 			}
1399 		}
1400 		group_map[cpu] = group;
1401 		group_cnt[group]++;
1402 		group_cnt_max = max(group_cnt_max, group_cnt[group]);
1403 	}
1404 
1405 	/*
1406 	 * Expand unit size until address space usage goes over 75%
1407 	 * and then as much as possible without using more address
1408 	 * space.
1409 	 */
1410 	last_allocs = INT_MAX;
1411 	for (upa = max_upa; upa; upa--) {
1412 		int allocs = 0, wasted = 0;
1413 
1414 		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1415 			continue;
1416 
1417 		for (group = 0; group < nr_groups; group++) {
1418 			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1419 			allocs += this_allocs;
1420 			wasted += this_allocs * upa - group_cnt[group];
1421 		}
1422 
1423 		/*
1424 		 * Don't accept if wastage is over 25%.  The
1425 		 * greater-than comparison ensures upa==1 always
1426 		 * passes the following check.
1427 		 */
1428 		if (wasted > num_possible_cpus() / 3)
1429 			continue;
1430 
1431 		/* and then don't consume more memory */
1432 		if (allocs > last_allocs)
1433 			break;
1434 		last_allocs = allocs;
1435 		best_upa = upa;
1436 	}
1437 	upa = best_upa;
1438 
1439 	/* allocate and fill alloc_info */
1440 	for (group = 0; group < nr_groups; group++)
1441 		nr_units += roundup(group_cnt[group], upa);
1442 
1443 	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1444 	if (!ai)
1445 		return ERR_PTR(-ENOMEM);
1446 	cpu_map = ai->groups[0].cpu_map;
1447 
1448 	for (group = 0; group < nr_groups; group++) {
1449 		ai->groups[group].cpu_map = cpu_map;
1450 		cpu_map += roundup(group_cnt[group], upa);
1451 	}
1452 
1453 	ai->static_size = static_size;
1454 	ai->reserved_size = reserved_size;
1455 	ai->dyn_size = dyn_size;
1456 	ai->unit_size = alloc_size / upa;
1457 	ai->atom_size = atom_size;
1458 	ai->alloc_size = alloc_size;
1459 
1460 	for (group = 0, unit = 0; group_cnt[group]; group++) {
1461 		struct pcpu_group_info *gi = &ai->groups[group];
1462 
1463 		/*
1464 		 * Initialize base_offset as if all groups are located
1465 		 * back-to-back.  The caller should update this to
1466 		 * reflect actual allocation.
1467 		 */
1468 		gi->base_offset = unit * ai->unit_size;
1469 
1470 		for_each_possible_cpu(cpu)
1471 			if (group_map[cpu] == group)
1472 				gi->cpu_map[gi->nr_units++] = cpu;
1473 		gi->nr_units = roundup(gi->nr_units, upa);
1474 		unit += gi->nr_units;
1475 	}
1476 	BUG_ON(unit != nr_units);
1477 
1478 	return ai;
1479 }
1480 
1481 /**
1482  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1483  * @lvl: loglevel
1484  * @ai: allocation info to dump
1485  *
1486  * Print out information about @ai using loglevel @lvl.
1487  */
1488 static void pcpu_dump_alloc_info(const char *lvl,
1489 				 const struct pcpu_alloc_info *ai)
1490 {
1491 	int group_width = 1, cpu_width = 1, width;
1492 	char empty_str[] = "--------";
1493 	int alloc = 0, alloc_end = 0;
1494 	int group, v;
1495 	int upa, apl;	/* units per alloc, allocs per line */
1496 
1497 	v = ai->nr_groups;
1498 	while (v /= 10)
1499 		group_width++;
1500 
1501 	v = num_possible_cpus();
1502 	while (v /= 10)
1503 		cpu_width++;
1504 	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1505 
1506 	upa = ai->alloc_size / ai->unit_size;
1507 	width = upa * (cpu_width + 1) + group_width + 3;
1508 	apl = rounddown_pow_of_two(max(60 / width, 1));
1509 
1510 	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1511 	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1512 	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1513 
1514 	for (group = 0; group < ai->nr_groups; group++) {
1515 		const struct pcpu_group_info *gi = &ai->groups[group];
1516 		int unit = 0, unit_end = 0;
1517 
1518 		BUG_ON(gi->nr_units % upa);
1519 		for (alloc_end += gi->nr_units / upa;
1520 		     alloc < alloc_end; alloc++) {
1521 			if (!(alloc % apl)) {
1522 				printk("\n");
1523 				printk("%spcpu-alloc: ", lvl);
1524 			}
1525 			printk("[%0*d] ", group_width, group);
1526 
1527 			for (unit_end += upa; unit < unit_end; unit++)
1528 				if (gi->cpu_map[unit] != NR_CPUS)
1529 					printk("%0*d ", cpu_width,
1530 					       gi->cpu_map[unit]);
1531 				else
1532 					printk("%s ", empty_str);
1533 		}
1534 	}
1535 	printk("\n");
1536 }
1537 
1538 /**
1539  * pcpu_setup_first_chunk - initialize the first percpu chunk
1540  * @ai: pcpu_alloc_info describing how to percpu area is shaped
1541  * @base_addr: mapped address
1542  *
1543  * Initialize the first percpu chunk which contains the kernel static
1544  * perpcu area.  This function is to be called from arch percpu area
1545  * setup path.
1546  *
1547  * @ai contains all information necessary to initialize the first
1548  * chunk and prime the dynamic percpu allocator.
1549  *
1550  * @ai->static_size is the size of static percpu area.
1551  *
1552  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1553  * reserve after the static area in the first chunk.  This reserves
1554  * the first chunk such that it's available only through reserved
1555  * percpu allocation.  This is primarily used to serve module percpu
1556  * static areas on architectures where the addressing model has
1557  * limited offset range for symbol relocations to guarantee module
1558  * percpu symbols fall inside the relocatable range.
1559  *
1560  * @ai->dyn_size determines the number of bytes available for dynamic
1561  * allocation in the first chunk.  The area between @ai->static_size +
1562  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1563  *
1564  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1565  * and equal to or larger than @ai->static_size + @ai->reserved_size +
1566  * @ai->dyn_size.
1567  *
1568  * @ai->atom_size is the allocation atom size and used as alignment
1569  * for vm areas.
1570  *
1571  * @ai->alloc_size is the allocation size and always multiple of
1572  * @ai->atom_size.  This is larger than @ai->atom_size if
1573  * @ai->unit_size is larger than @ai->atom_size.
1574  *
1575  * @ai->nr_groups and @ai->groups describe virtual memory layout of
1576  * percpu areas.  Units which should be colocated are put into the
1577  * same group.  Dynamic VM areas will be allocated according to these
1578  * groupings.  If @ai->nr_groups is zero, a single group containing
1579  * all units is assumed.
1580  *
1581  * The caller should have mapped the first chunk at @base_addr and
1582  * copied static data to each unit.
1583  *
1584  * If the first chunk ends up with both reserved and dynamic areas, it
1585  * is served by two chunks - one to serve the core static and reserved
1586  * areas and the other for the dynamic area.  They share the same vm
1587  * and page map but uses different area allocation map to stay away
1588  * from each other.  The latter chunk is circulated in the chunk slots
1589  * and available for dynamic allocation like any other chunks.
1590  *
1591  * RETURNS:
1592  * 0 on success, -errno on failure.
1593  */
1594 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1595 				  void *base_addr)
1596 {
1597 	static char cpus_buf[4096] __initdata;
1598 	static int smap[2], dmap[2];
1599 	size_t dyn_size = ai->dyn_size;
1600 	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1601 	struct pcpu_chunk *schunk, *dchunk = NULL;
1602 	unsigned long *group_offsets;
1603 	size_t *group_sizes;
1604 	unsigned long *unit_off;
1605 	unsigned int cpu;
1606 	int *unit_map;
1607 	int group, unit, i;
1608 
1609 	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1610 
1611 #define PCPU_SETUP_BUG_ON(cond)	do {					\
1612 	if (unlikely(cond)) {						\
1613 		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
1614 		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
1615 		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1616 		BUG();							\
1617 	}								\
1618 } while (0)
1619 
1620 	/* sanity checks */
1621 	BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1622 		     ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1623 	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1624 	PCPU_SETUP_BUG_ON(!ai->static_size);
1625 	PCPU_SETUP_BUG_ON(!base_addr);
1626 	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1627 	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1628 	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1629 
1630 	/* process group information and build config tables accordingly */
1631 	group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1632 	group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1633 	unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1634 	unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1635 
1636 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1637 		unit_map[cpu] = UINT_MAX;
1638 	pcpu_first_unit_cpu = NR_CPUS;
1639 
1640 	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1641 		const struct pcpu_group_info *gi = &ai->groups[group];
1642 
1643 		group_offsets[group] = gi->base_offset;
1644 		group_sizes[group] = gi->nr_units * ai->unit_size;
1645 
1646 		for (i = 0; i < gi->nr_units; i++) {
1647 			cpu = gi->cpu_map[i];
1648 			if (cpu == NR_CPUS)
1649 				continue;
1650 
1651 			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1652 			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1653 			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1654 
1655 			unit_map[cpu] = unit + i;
1656 			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1657 
1658 			if (pcpu_first_unit_cpu == NR_CPUS)
1659 				pcpu_first_unit_cpu = cpu;
1660 		}
1661 	}
1662 	pcpu_last_unit_cpu = cpu;
1663 	pcpu_nr_units = unit;
1664 
1665 	for_each_possible_cpu(cpu)
1666 		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1667 
1668 	/* we're done parsing the input, undefine BUG macro and dump config */
1669 #undef PCPU_SETUP_BUG_ON
1670 	pcpu_dump_alloc_info(KERN_INFO, ai);
1671 
1672 	pcpu_nr_groups = ai->nr_groups;
1673 	pcpu_group_offsets = group_offsets;
1674 	pcpu_group_sizes = group_sizes;
1675 	pcpu_unit_map = unit_map;
1676 	pcpu_unit_offsets = unit_off;
1677 
1678 	/* determine basic parameters */
1679 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1680 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1681 	pcpu_atom_size = ai->atom_size;
1682 	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1683 		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1684 
1685 	/*
1686 	 * Allocate chunk slots.  The additional last slot is for
1687 	 * empty chunks.
1688 	 */
1689 	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1690 	pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1691 	for (i = 0; i < pcpu_nr_slots; i++)
1692 		INIT_LIST_HEAD(&pcpu_slot[i]);
1693 
1694 	/*
1695 	 * Initialize static chunk.  If reserved_size is zero, the
1696 	 * static chunk covers static area + dynamic allocation area
1697 	 * in the first chunk.  If reserved_size is not zero, it
1698 	 * covers static area + reserved area (mostly used for module
1699 	 * static percpu allocation).
1700 	 */
1701 	schunk = alloc_bootmem(pcpu_chunk_struct_size);
1702 	INIT_LIST_HEAD(&schunk->list);
1703 	schunk->base_addr = base_addr;
1704 	schunk->map = smap;
1705 	schunk->map_alloc = ARRAY_SIZE(smap);
1706 	schunk->immutable = true;
1707 	bitmap_fill(schunk->populated, pcpu_unit_pages);
1708 
1709 	if (ai->reserved_size) {
1710 		schunk->free_size = ai->reserved_size;
1711 		pcpu_reserved_chunk = schunk;
1712 		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1713 	} else {
1714 		schunk->free_size = dyn_size;
1715 		dyn_size = 0;			/* dynamic area covered */
1716 	}
1717 	schunk->contig_hint = schunk->free_size;
1718 
1719 	schunk->map[schunk->map_used++] = -ai->static_size;
1720 	if (schunk->free_size)
1721 		schunk->map[schunk->map_used++] = schunk->free_size;
1722 
1723 	/* init dynamic chunk if necessary */
1724 	if (dyn_size) {
1725 		dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1726 		INIT_LIST_HEAD(&dchunk->list);
1727 		dchunk->base_addr = base_addr;
1728 		dchunk->map = dmap;
1729 		dchunk->map_alloc = ARRAY_SIZE(dmap);
1730 		dchunk->immutable = true;
1731 		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1732 
1733 		dchunk->contig_hint = dchunk->free_size = dyn_size;
1734 		dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1735 		dchunk->map[dchunk->map_used++] = dchunk->free_size;
1736 	}
1737 
1738 	/* link the first chunk in */
1739 	pcpu_first_chunk = dchunk ?: schunk;
1740 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1741 
1742 	/* we're done */
1743 	pcpu_base_addr = base_addr;
1744 	return 0;
1745 }
1746 
1747 const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1748 	[PCPU_FC_AUTO]	= "auto",
1749 	[PCPU_FC_EMBED]	= "embed",
1750 	[PCPU_FC_PAGE]	= "page",
1751 };
1752 
1753 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1754 
1755 static int __init percpu_alloc_setup(char *str)
1756 {
1757 	if (0)
1758 		/* nada */;
1759 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1760 	else if (!strcmp(str, "embed"))
1761 		pcpu_chosen_fc = PCPU_FC_EMBED;
1762 #endif
1763 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1764 	else if (!strcmp(str, "page"))
1765 		pcpu_chosen_fc = PCPU_FC_PAGE;
1766 #endif
1767 	else
1768 		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1769 
1770 	return 0;
1771 }
1772 early_param("percpu_alloc", percpu_alloc_setup);
1773 
1774 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1775 	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1776 /**
1777  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1778  * @reserved_size: the size of reserved percpu area in bytes
1779  * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1780  * @atom_size: allocation atom size
1781  * @cpu_distance_fn: callback to determine distance between cpus, optional
1782  * @alloc_fn: function to allocate percpu page
1783  * @free_fn: funtion to free percpu page
1784  *
1785  * This is a helper to ease setting up embedded first percpu chunk and
1786  * can be called where pcpu_setup_first_chunk() is expected.
1787  *
1788  * If this function is used to setup the first chunk, it is allocated
1789  * by calling @alloc_fn and used as-is without being mapped into
1790  * vmalloc area.  Allocations are always whole multiples of @atom_size
1791  * aligned to @atom_size.
1792  *
1793  * This enables the first chunk to piggy back on the linear physical
1794  * mapping which often uses larger page size.  Please note that this
1795  * can result in very sparse cpu->unit mapping on NUMA machines thus
1796  * requiring large vmalloc address space.  Don't use this allocator if
1797  * vmalloc space is not orders of magnitude larger than distances
1798  * between node memory addresses (ie. 32bit NUMA machines).
1799  *
1800  * When @dyn_size is positive, dynamic area might be larger than
1801  * specified to fill page alignment.  When @dyn_size is auto,
1802  * @dyn_size is just big enough to fill page alignment after static
1803  * and reserved areas.
1804  *
1805  * If the needed size is smaller than the minimum or specified unit
1806  * size, the leftover is returned using @free_fn.
1807  *
1808  * RETURNS:
1809  * 0 on success, -errno on failure.
1810  */
1811 int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1812 				  size_t atom_size,
1813 				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1814 				  pcpu_fc_alloc_fn_t alloc_fn,
1815 				  pcpu_fc_free_fn_t free_fn)
1816 {
1817 	void *base = (void *)ULONG_MAX;
1818 	void **areas = NULL;
1819 	struct pcpu_alloc_info *ai;
1820 	size_t size_sum, areas_size, max_distance;
1821 	int group, i, rc;
1822 
1823 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1824 				   cpu_distance_fn);
1825 	if (IS_ERR(ai))
1826 		return PTR_ERR(ai);
1827 
1828 	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1829 	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1830 
1831 	areas = alloc_bootmem_nopanic(areas_size);
1832 	if (!areas) {
1833 		rc = -ENOMEM;
1834 		goto out_free;
1835 	}
1836 
1837 	/* allocate, copy and determine base address */
1838 	for (group = 0; group < ai->nr_groups; group++) {
1839 		struct pcpu_group_info *gi = &ai->groups[group];
1840 		unsigned int cpu = NR_CPUS;
1841 		void *ptr;
1842 
1843 		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1844 			cpu = gi->cpu_map[i];
1845 		BUG_ON(cpu == NR_CPUS);
1846 
1847 		/* allocate space for the whole group */
1848 		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1849 		if (!ptr) {
1850 			rc = -ENOMEM;
1851 			goto out_free_areas;
1852 		}
1853 		areas[group] = ptr;
1854 
1855 		base = min(ptr, base);
1856 
1857 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1858 			if (gi->cpu_map[i] == NR_CPUS) {
1859 				/* unused unit, free whole */
1860 				free_fn(ptr, ai->unit_size);
1861 				continue;
1862 			}
1863 			/* copy and return the unused part */
1864 			memcpy(ptr, __per_cpu_load, ai->static_size);
1865 			free_fn(ptr + size_sum, ai->unit_size - size_sum);
1866 		}
1867 	}
1868 
1869 	/* base address is now known, determine group base offsets */
1870 	max_distance = 0;
1871 	for (group = 0; group < ai->nr_groups; group++) {
1872 		ai->groups[group].base_offset = areas[group] - base;
1873 		max_distance = max(max_distance, ai->groups[group].base_offset);
1874 	}
1875 	max_distance += ai->unit_size;
1876 
1877 	/* warn if maximum distance is further than 75% of vmalloc space */
1878 	if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1879 		pr_warning("PERCPU: max_distance=0x%lx too large for vmalloc "
1880 			   "space 0x%lx\n",
1881 			   max_distance, VMALLOC_END - VMALLOC_START);
1882 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1883 		/* and fail if we have fallback */
1884 		rc = -EINVAL;
1885 		goto out_free;
1886 #endif
1887 	}
1888 
1889 	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1890 		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1891 		ai->dyn_size, ai->unit_size);
1892 
1893 	rc = pcpu_setup_first_chunk(ai, base);
1894 	goto out_free;
1895 
1896 out_free_areas:
1897 	for (group = 0; group < ai->nr_groups; group++)
1898 		free_fn(areas[group],
1899 			ai->groups[group].nr_units * ai->unit_size);
1900 out_free:
1901 	pcpu_free_alloc_info(ai);
1902 	if (areas)
1903 		free_bootmem(__pa(areas), areas_size);
1904 	return rc;
1905 }
1906 #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1907 	  !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1908 
1909 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1910 /**
1911  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1912  * @reserved_size: the size of reserved percpu area in bytes
1913  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1914  * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1915  * @populate_pte_fn: function to populate pte
1916  *
1917  * This is a helper to ease setting up page-remapped first percpu
1918  * chunk and can be called where pcpu_setup_first_chunk() is expected.
1919  *
1920  * This is the basic allocator.  Static percpu area is allocated
1921  * page-by-page into vmalloc area.
1922  *
1923  * RETURNS:
1924  * 0 on success, -errno on failure.
1925  */
1926 int __init pcpu_page_first_chunk(size_t reserved_size,
1927 				 pcpu_fc_alloc_fn_t alloc_fn,
1928 				 pcpu_fc_free_fn_t free_fn,
1929 				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1930 {
1931 	static struct vm_struct vm;
1932 	struct pcpu_alloc_info *ai;
1933 	char psize_str[16];
1934 	int unit_pages;
1935 	size_t pages_size;
1936 	struct page **pages;
1937 	int unit, i, j, rc;
1938 
1939 	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1940 
1941 	ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
1942 	if (IS_ERR(ai))
1943 		return PTR_ERR(ai);
1944 	BUG_ON(ai->nr_groups != 1);
1945 	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1946 
1947 	unit_pages = ai->unit_size >> PAGE_SHIFT;
1948 
1949 	/* unaligned allocations can't be freed, round up to page size */
1950 	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1951 			       sizeof(pages[0]));
1952 	pages = alloc_bootmem(pages_size);
1953 
1954 	/* allocate pages */
1955 	j = 0;
1956 	for (unit = 0; unit < num_possible_cpus(); unit++)
1957 		for (i = 0; i < unit_pages; i++) {
1958 			unsigned int cpu = ai->groups[0].cpu_map[unit];
1959 			void *ptr;
1960 
1961 			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1962 			if (!ptr) {
1963 				pr_warning("PERCPU: failed to allocate %s page "
1964 					   "for cpu%u\n", psize_str, cpu);
1965 				goto enomem;
1966 			}
1967 			pages[j++] = virt_to_page(ptr);
1968 		}
1969 
1970 	/* allocate vm area, map the pages and copy static data */
1971 	vm.flags = VM_ALLOC;
1972 	vm.size = num_possible_cpus() * ai->unit_size;
1973 	vm_area_register_early(&vm, PAGE_SIZE);
1974 
1975 	for (unit = 0; unit < num_possible_cpus(); unit++) {
1976 		unsigned long unit_addr =
1977 			(unsigned long)vm.addr + unit * ai->unit_size;
1978 
1979 		for (i = 0; i < unit_pages; i++)
1980 			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1981 
1982 		/* pte already populated, the following shouldn't fail */
1983 		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1984 				      unit_pages);
1985 		if (rc < 0)
1986 			panic("failed to map percpu area, err=%d\n", rc);
1987 
1988 		/*
1989 		 * FIXME: Archs with virtual cache should flush local
1990 		 * cache for the linear mapping here - something
1991 		 * equivalent to flush_cache_vmap() on the local cpu.
1992 		 * flush_cache_vmap() can't be used as most supporting
1993 		 * data structures are not set up yet.
1994 		 */
1995 
1996 		/* copy static data */
1997 		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1998 	}
1999 
2000 	/* we're ready, commit */
2001 	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
2002 		unit_pages, psize_str, vm.addr, ai->static_size,
2003 		ai->reserved_size, ai->dyn_size);
2004 
2005 	rc = pcpu_setup_first_chunk(ai, vm.addr);
2006 	goto out_free_ar;
2007 
2008 enomem:
2009 	while (--j >= 0)
2010 		free_fn(page_address(pages[j]), PAGE_SIZE);
2011 	rc = -ENOMEM;
2012 out_free_ar:
2013 	free_bootmem(__pa(pages), pages_size);
2014 	pcpu_free_alloc_info(ai);
2015 	return rc;
2016 }
2017 #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
2018 
2019 /*
2020  * Generic percpu area setup.
2021  *
2022  * The embedding helper is used because its behavior closely resembles
2023  * the original non-dynamic generic percpu area setup.  This is
2024  * important because many archs have addressing restrictions and might
2025  * fail if the percpu area is located far away from the previous
2026  * location.  As an added bonus, in non-NUMA cases, embedding is
2027  * generally a good idea TLB-wise because percpu area can piggy back
2028  * on the physical linear memory mapping which uses large page
2029  * mappings on applicable archs.
2030  */
2031 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2032 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2033 EXPORT_SYMBOL(__per_cpu_offset);
2034 
2035 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2036 				       size_t align)
2037 {
2038 	return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
2039 }
2040 
2041 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2042 {
2043 	free_bootmem(__pa(ptr), size);
2044 }
2045 
2046 void __init setup_per_cpu_areas(void)
2047 {
2048 	unsigned long delta;
2049 	unsigned int cpu;
2050 	int rc;
2051 
2052 	/*
2053 	 * Always reserve area for module percpu variables.  That's
2054 	 * what the legacy allocator did.
2055 	 */
2056 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2057 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2058 				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2059 	if (rc < 0)
2060 		panic("Failed to initialized percpu areas.");
2061 
2062 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2063 	for_each_possible_cpu(cpu)
2064 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2065 }
2066 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2067