xref: /openbmc/linux/mm/percpu.c (revision 48c926cd)
1 /*
2  * mm/percpu.c - percpu memory allocator
3  *
4  * Copyright (C) 2009		SUSE Linux Products GmbH
5  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6  *
7  * Copyright (C) 2017		Facebook Inc.
8  * Copyright (C) 2017		Dennis Zhou <dennisszhou@gmail.com>
9  *
10  * This file is released under the GPLv2 license.
11  *
12  * The percpu allocator handles both static and dynamic areas.  Percpu
13  * areas are allocated in chunks which are divided into units.  There is
14  * a 1-to-1 mapping for units to possible cpus.  These units are grouped
15  * based on NUMA properties of the machine.
16  *
17  *  c0                           c1                         c2
18  *  -------------------          -------------------        ------------
19  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
20  *  -------------------  ......  -------------------  ....  ------------
21  *
22  * Allocation is done by offsets into a unit's address space.  Ie., an
23  * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
24  * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
25  * and even sparse.  Access is handled by configuring percpu base
26  * registers according to the cpu to unit mappings and offsetting the
27  * base address using pcpu_unit_size.
28  *
29  * There is special consideration for the first chunk which must handle
30  * the static percpu variables in the kernel image as allocation services
31  * are not online yet.  In short, the first chunk is structured like so:
32  *
33  *                  <Static | [Reserved] | Dynamic>
34  *
35  * The static data is copied from the original section managed by the
36  * linker.  The reserved section, if non-zero, primarily manages static
37  * percpu variables from kernel modules.  Finally, the dynamic section
38  * takes care of normal allocations.
39  *
40  * The allocator organizes chunks into lists according to free size and
41  * tries to allocate from the fullest chunk first.  Each chunk is managed
42  * by a bitmap with metadata blocks.  The allocation map is updated on
43  * every allocation and free to reflect the current state while the boundary
44  * map is only updated on allocation.  Each metadata block contains
45  * information to help mitigate the need to iterate over large portions
46  * of the bitmap.  The reverse mapping from page to chunk is stored in
47  * the page's index.  Lastly, units are lazily backed and grow in unison.
48  *
49  * There is a unique conversion that goes on here between bytes and bits.
50  * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
51  * tracks the number of pages it is responsible for in nr_pages.  Helper
52  * functions are used to convert from between the bytes, bits, and blocks.
53  * All hints are managed in bits unless explicitly stated.
54  *
55  * To use this allocator, arch code should do the following:
56  *
57  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
58  *   regular address to percpu pointer and back if they need to be
59  *   different from the default
60  *
61  * - use pcpu_setup_first_chunk() during percpu area initialization to
62  *   setup the first chunk containing the kernel static percpu area
63  */
64 
65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
66 
67 #include <linux/bitmap.h>
68 #include <linux/bootmem.h>
69 #include <linux/err.h>
70 #include <linux/lcm.h>
71 #include <linux/list.h>
72 #include <linux/log2.h>
73 #include <linux/mm.h>
74 #include <linux/module.h>
75 #include <linux/mutex.h>
76 #include <linux/percpu.h>
77 #include <linux/pfn.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/vmalloc.h>
81 #include <linux/workqueue.h>
82 #include <linux/kmemleak.h>
83 
84 #include <asm/cacheflush.h>
85 #include <asm/sections.h>
86 #include <asm/tlbflush.h>
87 #include <asm/io.h>
88 
89 #define CREATE_TRACE_POINTS
90 #include <trace/events/percpu.h>
91 
92 #include "percpu-internal.h"
93 
94 /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
95 #define PCPU_SLOT_BASE_SHIFT		5
96 
97 #define PCPU_EMPTY_POP_PAGES_LOW	2
98 #define PCPU_EMPTY_POP_PAGES_HIGH	4
99 
100 #ifdef CONFIG_SMP
101 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
102 #ifndef __addr_to_pcpu_ptr
103 #define __addr_to_pcpu_ptr(addr)					\
104 	(void __percpu *)((unsigned long)(addr) -			\
105 			  (unsigned long)pcpu_base_addr	+		\
106 			  (unsigned long)__per_cpu_start)
107 #endif
108 #ifndef __pcpu_ptr_to_addr
109 #define __pcpu_ptr_to_addr(ptr)						\
110 	(void __force *)((unsigned long)(ptr) +				\
111 			 (unsigned long)pcpu_base_addr -		\
112 			 (unsigned long)__per_cpu_start)
113 #endif
114 #else	/* CONFIG_SMP */
115 /* on UP, it's always identity mapped */
116 #define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
117 #define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
118 #endif	/* CONFIG_SMP */
119 
120 static int pcpu_unit_pages __ro_after_init;
121 static int pcpu_unit_size __ro_after_init;
122 static int pcpu_nr_units __ro_after_init;
123 static int pcpu_atom_size __ro_after_init;
124 int pcpu_nr_slots __ro_after_init;
125 static size_t pcpu_chunk_struct_size __ro_after_init;
126 
127 /* cpus with the lowest and highest unit addresses */
128 static unsigned int pcpu_low_unit_cpu __ro_after_init;
129 static unsigned int pcpu_high_unit_cpu __ro_after_init;
130 
131 /* the address of the first chunk which starts with the kernel static area */
132 void *pcpu_base_addr __ro_after_init;
133 EXPORT_SYMBOL_GPL(pcpu_base_addr);
134 
135 static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
136 const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
137 
138 /* group information, used for vm allocation */
139 static int pcpu_nr_groups __ro_after_init;
140 static const unsigned long *pcpu_group_offsets __ro_after_init;
141 static const size_t *pcpu_group_sizes __ro_after_init;
142 
143 /*
144  * The first chunk which always exists.  Note that unlike other
145  * chunks, this one can be allocated and mapped in several different
146  * ways and thus often doesn't live in the vmalloc area.
147  */
148 struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
149 
150 /*
151  * Optional reserved chunk.  This chunk reserves part of the first
152  * chunk and serves it for reserved allocations.  When the reserved
153  * region doesn't exist, the following variable is NULL.
154  */
155 struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
156 
157 DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
158 static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
159 
160 struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
161 
162 /* chunks which need their map areas extended, protected by pcpu_lock */
163 static LIST_HEAD(pcpu_map_extend_chunks);
164 
165 /*
166  * The number of empty populated pages, protected by pcpu_lock.  The
167  * reserved chunk doesn't contribute to the count.
168  */
169 int pcpu_nr_empty_pop_pages;
170 
171 /*
172  * Balance work is used to populate or destroy chunks asynchronously.  We
173  * try to keep the number of populated free pages between
174  * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
175  * empty chunk.
176  */
177 static void pcpu_balance_workfn(struct work_struct *work);
178 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
179 static bool pcpu_async_enabled __read_mostly;
180 static bool pcpu_atomic_alloc_failed;
181 
182 static void pcpu_schedule_balance_work(void)
183 {
184 	if (pcpu_async_enabled)
185 		schedule_work(&pcpu_balance_work);
186 }
187 
188 /**
189  * pcpu_addr_in_chunk - check if the address is served from this chunk
190  * @chunk: chunk of interest
191  * @addr: percpu address
192  *
193  * RETURNS:
194  * True if the address is served from this chunk.
195  */
196 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
197 {
198 	void *start_addr, *end_addr;
199 
200 	if (!chunk)
201 		return false;
202 
203 	start_addr = chunk->base_addr + chunk->start_offset;
204 	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
205 		   chunk->end_offset;
206 
207 	return addr >= start_addr && addr < end_addr;
208 }
209 
210 static int __pcpu_size_to_slot(int size)
211 {
212 	int highbit = fls(size);	/* size is in bytes */
213 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
214 }
215 
216 static int pcpu_size_to_slot(int size)
217 {
218 	if (size == pcpu_unit_size)
219 		return pcpu_nr_slots - 1;
220 	return __pcpu_size_to_slot(size);
221 }
222 
223 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
224 {
225 	if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || chunk->contig_bits == 0)
226 		return 0;
227 
228 	return pcpu_size_to_slot(chunk->free_bytes);
229 }
230 
231 /* set the pointer to a chunk in a page struct */
232 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
233 {
234 	page->index = (unsigned long)pcpu;
235 }
236 
237 /* obtain pointer to a chunk from a page struct */
238 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
239 {
240 	return (struct pcpu_chunk *)page->index;
241 }
242 
243 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
244 {
245 	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
246 }
247 
248 static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
249 {
250 	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
251 }
252 
253 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
254 				     unsigned int cpu, int page_idx)
255 {
256 	return (unsigned long)chunk->base_addr +
257 	       pcpu_unit_page_offset(cpu, page_idx);
258 }
259 
260 static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end)
261 {
262 	*rs = find_next_zero_bit(bitmap, end, *rs);
263 	*re = find_next_bit(bitmap, end, *rs + 1);
264 }
265 
266 static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end)
267 {
268 	*rs = find_next_bit(bitmap, end, *rs);
269 	*re = find_next_zero_bit(bitmap, end, *rs + 1);
270 }
271 
272 /*
273  * Bitmap region iterators.  Iterates over the bitmap between
274  * [@start, @end) in @chunk.  @rs and @re should be integer variables
275  * and will be set to start and end index of the current free region.
276  */
277 #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end)		     \
278 	for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
279 	     (rs) < (re);						     \
280 	     (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
281 
282 #define pcpu_for_each_pop_region(bitmap, rs, re, start, end)		     \
283 	for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end));   \
284 	     (rs) < (re);						     \
285 	     (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end)))
286 
287 /*
288  * The following are helper functions to help access bitmaps and convert
289  * between bitmap offsets to address offsets.
290  */
291 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
292 {
293 	return chunk->alloc_map +
294 	       (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
295 }
296 
297 static unsigned long pcpu_off_to_block_index(int off)
298 {
299 	return off / PCPU_BITMAP_BLOCK_BITS;
300 }
301 
302 static unsigned long pcpu_off_to_block_off(int off)
303 {
304 	return off & (PCPU_BITMAP_BLOCK_BITS - 1);
305 }
306 
307 static unsigned long pcpu_block_off_to_off(int index, int off)
308 {
309 	return index * PCPU_BITMAP_BLOCK_BITS + off;
310 }
311 
312 /**
313  * pcpu_next_md_free_region - finds the next hint free area
314  * @chunk: chunk of interest
315  * @bit_off: chunk offset
316  * @bits: size of free area
317  *
318  * Helper function for pcpu_for_each_md_free_region.  It checks
319  * block->contig_hint and performs aggregation across blocks to find the
320  * next hint.  It modifies bit_off and bits in-place to be consumed in the
321  * loop.
322  */
323 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
324 				     int *bits)
325 {
326 	int i = pcpu_off_to_block_index(*bit_off);
327 	int block_off = pcpu_off_to_block_off(*bit_off);
328 	struct pcpu_block_md *block;
329 
330 	*bits = 0;
331 	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
332 	     block++, i++) {
333 		/* handles contig area across blocks */
334 		if (*bits) {
335 			*bits += block->left_free;
336 			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
337 				continue;
338 			return;
339 		}
340 
341 		/*
342 		 * This checks three things.  First is there a contig_hint to
343 		 * check.  Second, have we checked this hint before by
344 		 * comparing the block_off.  Third, is this the same as the
345 		 * right contig hint.  In the last case, it spills over into
346 		 * the next block and should be handled by the contig area
347 		 * across blocks code.
348 		 */
349 		*bits = block->contig_hint;
350 		if (*bits && block->contig_hint_start >= block_off &&
351 		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
352 			*bit_off = pcpu_block_off_to_off(i,
353 					block->contig_hint_start);
354 			return;
355 		}
356 
357 		*bits = block->right_free;
358 		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
359 	}
360 }
361 
362 /**
363  * pcpu_next_fit_region - finds fit areas for a given allocation request
364  * @chunk: chunk of interest
365  * @alloc_bits: size of allocation
366  * @align: alignment of area (max PAGE_SIZE)
367  * @bit_off: chunk offset
368  * @bits: size of free area
369  *
370  * Finds the next free region that is viable for use with a given size and
371  * alignment.  This only returns if there is a valid area to be used for this
372  * allocation.  block->first_free is returned if the allocation request fits
373  * within the block to see if the request can be fulfilled prior to the contig
374  * hint.
375  */
376 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
377 				 int align, int *bit_off, int *bits)
378 {
379 	int i = pcpu_off_to_block_index(*bit_off);
380 	int block_off = pcpu_off_to_block_off(*bit_off);
381 	struct pcpu_block_md *block;
382 
383 	*bits = 0;
384 	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
385 	     block++, i++) {
386 		/* handles contig area across blocks */
387 		if (*bits) {
388 			*bits += block->left_free;
389 			if (*bits >= alloc_bits)
390 				return;
391 			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
392 				continue;
393 		}
394 
395 		/* check block->contig_hint */
396 		*bits = ALIGN(block->contig_hint_start, align) -
397 			block->contig_hint_start;
398 		/*
399 		 * This uses the block offset to determine if this has been
400 		 * checked in the prior iteration.
401 		 */
402 		if (block->contig_hint &&
403 		    block->contig_hint_start >= block_off &&
404 		    block->contig_hint >= *bits + alloc_bits) {
405 			*bits += alloc_bits + block->contig_hint_start -
406 				 block->first_free;
407 			*bit_off = pcpu_block_off_to_off(i, block->first_free);
408 			return;
409 		}
410 
411 		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
412 				 align);
413 		*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
414 		*bit_off = pcpu_block_off_to_off(i, *bit_off);
415 		if (*bits >= alloc_bits)
416 			return;
417 	}
418 
419 	/* no valid offsets were found - fail condition */
420 	*bit_off = pcpu_chunk_map_bits(chunk);
421 }
422 
423 /*
424  * Metadata free area iterators.  These perform aggregation of free areas
425  * based on the metadata blocks and return the offset @bit_off and size in
426  * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
427  * a fit is found for the allocation request.
428  */
429 #define pcpu_for_each_md_free_region(chunk, bit_off, bits)		\
430 	for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));	\
431 	     (bit_off) < pcpu_chunk_map_bits((chunk));			\
432 	     (bit_off) += (bits) + 1,					\
433 	     pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
434 
435 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
436 	for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
437 				  &(bits));				      \
438 	     (bit_off) < pcpu_chunk_map_bits((chunk));			      \
439 	     (bit_off) += (bits),					      \
440 	     pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
441 				  &(bits)))
442 
443 /**
444  * pcpu_mem_zalloc - allocate memory
445  * @size: bytes to allocate
446  *
447  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
448  * kzalloc() is used; otherwise, vzalloc() is used.  The returned
449  * memory is always zeroed.
450  *
451  * CONTEXT:
452  * Does GFP_KERNEL allocation.
453  *
454  * RETURNS:
455  * Pointer to the allocated area on success, NULL on failure.
456  */
457 static void *pcpu_mem_zalloc(size_t size)
458 {
459 	if (WARN_ON_ONCE(!slab_is_available()))
460 		return NULL;
461 
462 	if (size <= PAGE_SIZE)
463 		return kzalloc(size, GFP_KERNEL);
464 	else
465 		return vzalloc(size);
466 }
467 
468 /**
469  * pcpu_mem_free - free memory
470  * @ptr: memory to free
471  *
472  * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
473  */
474 static void pcpu_mem_free(void *ptr)
475 {
476 	kvfree(ptr);
477 }
478 
479 /**
480  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
481  * @chunk: chunk of interest
482  * @oslot: the previous slot it was on
483  *
484  * This function is called after an allocation or free changed @chunk.
485  * New slot according to the changed state is determined and @chunk is
486  * moved to the slot.  Note that the reserved chunk is never put on
487  * chunk slots.
488  *
489  * CONTEXT:
490  * pcpu_lock.
491  */
492 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
493 {
494 	int nslot = pcpu_chunk_slot(chunk);
495 
496 	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
497 		if (oslot < nslot)
498 			list_move(&chunk->list, &pcpu_slot[nslot]);
499 		else
500 			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
501 	}
502 }
503 
504 /**
505  * pcpu_cnt_pop_pages- counts populated backing pages in range
506  * @chunk: chunk of interest
507  * @bit_off: start offset
508  * @bits: size of area to check
509  *
510  * Calculates the number of populated pages in the region
511  * [page_start, page_end).  This keeps track of how many empty populated
512  * pages are available and decide if async work should be scheduled.
513  *
514  * RETURNS:
515  * The nr of populated pages.
516  */
517 static inline int pcpu_cnt_pop_pages(struct pcpu_chunk *chunk, int bit_off,
518 				     int bits)
519 {
520 	int page_start = PFN_UP(bit_off * PCPU_MIN_ALLOC_SIZE);
521 	int page_end = PFN_DOWN((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
522 
523 	if (page_start >= page_end)
524 		return 0;
525 
526 	/*
527 	 * bitmap_weight counts the number of bits set in a bitmap up to
528 	 * the specified number of bits.  This is counting the populated
529 	 * pages up to page_end and then subtracting the populated pages
530 	 * up to page_start to count the populated pages in
531 	 * [page_start, page_end).
532 	 */
533 	return bitmap_weight(chunk->populated, page_end) -
534 	       bitmap_weight(chunk->populated, page_start);
535 }
536 
537 /**
538  * pcpu_chunk_update - updates the chunk metadata given a free area
539  * @chunk: chunk of interest
540  * @bit_off: chunk offset
541  * @bits: size of free area
542  *
543  * This updates the chunk's contig hint and starting offset given a free area.
544  * Choose the best starting offset if the contig hint is equal.
545  */
546 static void pcpu_chunk_update(struct pcpu_chunk *chunk, int bit_off, int bits)
547 {
548 	if (bits > chunk->contig_bits) {
549 		chunk->contig_bits_start = bit_off;
550 		chunk->contig_bits = bits;
551 	} else if (bits == chunk->contig_bits && chunk->contig_bits_start &&
552 		   (!bit_off ||
553 		    __ffs(bit_off) > __ffs(chunk->contig_bits_start))) {
554 		/* use the start with the best alignment */
555 		chunk->contig_bits_start = bit_off;
556 	}
557 }
558 
559 /**
560  * pcpu_chunk_refresh_hint - updates metadata about a chunk
561  * @chunk: chunk of interest
562  *
563  * Iterates over the metadata blocks to find the largest contig area.
564  * It also counts the populated pages and uses the delta to update the
565  * global count.
566  *
567  * Updates:
568  *      chunk->contig_bits
569  *      chunk->contig_bits_start
570  *      nr_empty_pop_pages (chunk and global)
571  */
572 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk)
573 {
574 	int bit_off, bits, nr_empty_pop_pages;
575 
576 	/* clear metadata */
577 	chunk->contig_bits = 0;
578 
579 	bit_off = chunk->first_bit;
580 	bits = nr_empty_pop_pages = 0;
581 	pcpu_for_each_md_free_region(chunk, bit_off, bits) {
582 		pcpu_chunk_update(chunk, bit_off, bits);
583 
584 		nr_empty_pop_pages += pcpu_cnt_pop_pages(chunk, bit_off, bits);
585 	}
586 
587 	/*
588 	 * Keep track of nr_empty_pop_pages.
589 	 *
590 	 * The chunk maintains the previous number of free pages it held,
591 	 * so the delta is used to update the global counter.  The reserved
592 	 * chunk is not part of the free page count as they are populated
593 	 * at init and are special to serving reserved allocations.
594 	 */
595 	if (chunk != pcpu_reserved_chunk)
596 		pcpu_nr_empty_pop_pages +=
597 			(nr_empty_pop_pages - chunk->nr_empty_pop_pages);
598 
599 	chunk->nr_empty_pop_pages = nr_empty_pop_pages;
600 }
601 
602 /**
603  * pcpu_block_update - updates a block given a free area
604  * @block: block of interest
605  * @start: start offset in block
606  * @end: end offset in block
607  *
608  * Updates a block given a known free area.  The region [start, end) is
609  * expected to be the entirety of the free area within a block.  Chooses
610  * the best starting offset if the contig hints are equal.
611  */
612 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
613 {
614 	int contig = end - start;
615 
616 	block->first_free = min(block->first_free, start);
617 	if (start == 0)
618 		block->left_free = contig;
619 
620 	if (end == PCPU_BITMAP_BLOCK_BITS)
621 		block->right_free = contig;
622 
623 	if (contig > block->contig_hint) {
624 		block->contig_hint_start = start;
625 		block->contig_hint = contig;
626 	} else if (block->contig_hint_start && contig == block->contig_hint &&
627 		   (!start || __ffs(start) > __ffs(block->contig_hint_start))) {
628 		/* use the start with the best alignment */
629 		block->contig_hint_start = start;
630 	}
631 }
632 
633 /**
634  * pcpu_block_refresh_hint
635  * @chunk: chunk of interest
636  * @index: index of the metadata block
637  *
638  * Scans over the block beginning at first_free and updates the block
639  * metadata accordingly.
640  */
641 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
642 {
643 	struct pcpu_block_md *block = chunk->md_blocks + index;
644 	unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
645 	int rs, re;	/* region start, region end */
646 
647 	/* clear hints */
648 	block->contig_hint = 0;
649 	block->left_free = block->right_free = 0;
650 
651 	/* iterate over free areas and update the contig hints */
652 	pcpu_for_each_unpop_region(alloc_map, rs, re, block->first_free,
653 				   PCPU_BITMAP_BLOCK_BITS) {
654 		pcpu_block_update(block, rs, re);
655 	}
656 }
657 
658 /**
659  * pcpu_block_update_hint_alloc - update hint on allocation path
660  * @chunk: chunk of interest
661  * @bit_off: chunk offset
662  * @bits: size of request
663  *
664  * Updates metadata for the allocation path.  The metadata only has to be
665  * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
666  * scans are required if the block's contig hint is broken.
667  */
668 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
669 					 int bits)
670 {
671 	struct pcpu_block_md *s_block, *e_block, *block;
672 	int s_index, e_index;	/* block indexes of the freed allocation */
673 	int s_off, e_off;	/* block offsets of the freed allocation */
674 
675 	/*
676 	 * Calculate per block offsets.
677 	 * The calculation uses an inclusive range, but the resulting offsets
678 	 * are [start, end).  e_index always points to the last block in the
679 	 * range.
680 	 */
681 	s_index = pcpu_off_to_block_index(bit_off);
682 	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
683 	s_off = pcpu_off_to_block_off(bit_off);
684 	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
685 
686 	s_block = chunk->md_blocks + s_index;
687 	e_block = chunk->md_blocks + e_index;
688 
689 	/*
690 	 * Update s_block.
691 	 * block->first_free must be updated if the allocation takes its place.
692 	 * If the allocation breaks the contig_hint, a scan is required to
693 	 * restore this hint.
694 	 */
695 	if (s_off == s_block->first_free)
696 		s_block->first_free = find_next_zero_bit(
697 					pcpu_index_alloc_map(chunk, s_index),
698 					PCPU_BITMAP_BLOCK_BITS,
699 					s_off + bits);
700 
701 	if (s_off >= s_block->contig_hint_start &&
702 	    s_off < s_block->contig_hint_start + s_block->contig_hint) {
703 		/* block contig hint is broken - scan to fix it */
704 		pcpu_block_refresh_hint(chunk, s_index);
705 	} else {
706 		/* update left and right contig manually */
707 		s_block->left_free = min(s_block->left_free, s_off);
708 		if (s_index == e_index)
709 			s_block->right_free = min_t(int, s_block->right_free,
710 					PCPU_BITMAP_BLOCK_BITS - e_off);
711 		else
712 			s_block->right_free = 0;
713 	}
714 
715 	/*
716 	 * Update e_block.
717 	 */
718 	if (s_index != e_index) {
719 		/*
720 		 * When the allocation is across blocks, the end is along
721 		 * the left part of the e_block.
722 		 */
723 		e_block->first_free = find_next_zero_bit(
724 				pcpu_index_alloc_map(chunk, e_index),
725 				PCPU_BITMAP_BLOCK_BITS, e_off);
726 
727 		if (e_off == PCPU_BITMAP_BLOCK_BITS) {
728 			/* reset the block */
729 			e_block++;
730 		} else {
731 			if (e_off > e_block->contig_hint_start) {
732 				/* contig hint is broken - scan to fix it */
733 				pcpu_block_refresh_hint(chunk, e_index);
734 			} else {
735 				e_block->left_free = 0;
736 				e_block->right_free =
737 					min_t(int, e_block->right_free,
738 					      PCPU_BITMAP_BLOCK_BITS - e_off);
739 			}
740 		}
741 
742 		/* update in-between md_blocks */
743 		for (block = s_block + 1; block < e_block; block++) {
744 			block->contig_hint = 0;
745 			block->left_free = 0;
746 			block->right_free = 0;
747 		}
748 	}
749 
750 	/*
751 	 * The only time a full chunk scan is required is if the chunk
752 	 * contig hint is broken.  Otherwise, it means a smaller space
753 	 * was used and therefore the chunk contig hint is still correct.
754 	 */
755 	if (bit_off >= chunk->contig_bits_start  &&
756 	    bit_off < chunk->contig_bits_start + chunk->contig_bits)
757 		pcpu_chunk_refresh_hint(chunk);
758 }
759 
760 /**
761  * pcpu_block_update_hint_free - updates the block hints on the free path
762  * @chunk: chunk of interest
763  * @bit_off: chunk offset
764  * @bits: size of request
765  *
766  * Updates metadata for the allocation path.  This avoids a blind block
767  * refresh by making use of the block contig hints.  If this fails, it scans
768  * forward and backward to determine the extent of the free area.  This is
769  * capped at the boundary of blocks.
770  *
771  * A chunk update is triggered if a page becomes free, a block becomes free,
772  * or the free spans across blocks.  This tradeoff is to minimize iterating
773  * over the block metadata to update chunk->contig_bits.  chunk->contig_bits
774  * may be off by up to a page, but it will never be more than the available
775  * space.  If the contig hint is contained in one block, it will be accurate.
776  */
777 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
778 					int bits)
779 {
780 	struct pcpu_block_md *s_block, *e_block, *block;
781 	int s_index, e_index;	/* block indexes of the freed allocation */
782 	int s_off, e_off;	/* block offsets of the freed allocation */
783 	int start, end;		/* start and end of the whole free area */
784 
785 	/*
786 	 * Calculate per block offsets.
787 	 * The calculation uses an inclusive range, but the resulting offsets
788 	 * are [start, end).  e_index always points to the last block in the
789 	 * range.
790 	 */
791 	s_index = pcpu_off_to_block_index(bit_off);
792 	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
793 	s_off = pcpu_off_to_block_off(bit_off);
794 	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
795 
796 	s_block = chunk->md_blocks + s_index;
797 	e_block = chunk->md_blocks + e_index;
798 
799 	/*
800 	 * Check if the freed area aligns with the block->contig_hint.
801 	 * If it does, then the scan to find the beginning/end of the
802 	 * larger free area can be avoided.
803 	 *
804 	 * start and end refer to beginning and end of the free area
805 	 * within each their respective blocks.  This is not necessarily
806 	 * the entire free area as it may span blocks past the beginning
807 	 * or end of the block.
808 	 */
809 	start = s_off;
810 	if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
811 		start = s_block->contig_hint_start;
812 	} else {
813 		/*
814 		 * Scan backwards to find the extent of the free area.
815 		 * find_last_bit returns the starting bit, so if the start bit
816 		 * is returned, that means there was no last bit and the
817 		 * remainder of the chunk is free.
818 		 */
819 		int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
820 					  start);
821 		start = (start == l_bit) ? 0 : l_bit + 1;
822 	}
823 
824 	end = e_off;
825 	if (e_off == e_block->contig_hint_start)
826 		end = e_block->contig_hint_start + e_block->contig_hint;
827 	else
828 		end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
829 				    PCPU_BITMAP_BLOCK_BITS, end);
830 
831 	/* update s_block */
832 	e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
833 	pcpu_block_update(s_block, start, e_off);
834 
835 	/* freeing in the same block */
836 	if (s_index != e_index) {
837 		/* update e_block */
838 		pcpu_block_update(e_block, 0, end);
839 
840 		/* reset md_blocks in the middle */
841 		for (block = s_block + 1; block < e_block; block++) {
842 			block->first_free = 0;
843 			block->contig_hint_start = 0;
844 			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
845 			block->left_free = PCPU_BITMAP_BLOCK_BITS;
846 			block->right_free = PCPU_BITMAP_BLOCK_BITS;
847 		}
848 	}
849 
850 	/*
851 	 * Refresh chunk metadata when the free makes a page free, a block
852 	 * free, or spans across blocks.  The contig hint may be off by up to
853 	 * a page, but if the hint is contained in a block, it will be accurate
854 	 * with the else condition below.
855 	 */
856 	if ((ALIGN_DOWN(end, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS)) >
857 	     ALIGN(start, min(PCPU_BITS_PER_PAGE, PCPU_BITMAP_BLOCK_BITS))) ||
858 	    s_index != e_index)
859 		pcpu_chunk_refresh_hint(chunk);
860 	else
861 		pcpu_chunk_update(chunk, pcpu_block_off_to_off(s_index, start),
862 				  s_block->contig_hint);
863 }
864 
865 /**
866  * pcpu_is_populated - determines if the region is populated
867  * @chunk: chunk of interest
868  * @bit_off: chunk offset
869  * @bits: size of area
870  * @next_off: return value for the next offset to start searching
871  *
872  * For atomic allocations, check if the backing pages are populated.
873  *
874  * RETURNS:
875  * Bool if the backing pages are populated.
876  * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
877  */
878 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
879 			      int *next_off)
880 {
881 	int page_start, page_end, rs, re;
882 
883 	page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
884 	page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
885 
886 	rs = page_start;
887 	pcpu_next_unpop(chunk->populated, &rs, &re, page_end);
888 	if (rs >= page_end)
889 		return true;
890 
891 	*next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
892 	return false;
893 }
894 
895 /**
896  * pcpu_find_block_fit - finds the block index to start searching
897  * @chunk: chunk of interest
898  * @alloc_bits: size of request in allocation units
899  * @align: alignment of area (max PAGE_SIZE bytes)
900  * @pop_only: use populated regions only
901  *
902  * Given a chunk and an allocation spec, find the offset to begin searching
903  * for a free region.  This iterates over the bitmap metadata blocks to
904  * find an offset that will be guaranteed to fit the requirements.  It is
905  * not quite first fit as if the allocation does not fit in the contig hint
906  * of a block or chunk, it is skipped.  This errs on the side of caution
907  * to prevent excess iteration.  Poor alignment can cause the allocator to
908  * skip over blocks and chunks that have valid free areas.
909  *
910  * RETURNS:
911  * The offset in the bitmap to begin searching.
912  * -1 if no offset is found.
913  */
914 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
915 			       size_t align, bool pop_only)
916 {
917 	int bit_off, bits, next_off;
918 
919 	/*
920 	 * Check to see if the allocation can fit in the chunk's contig hint.
921 	 * This is an optimization to prevent scanning by assuming if it
922 	 * cannot fit in the global hint, there is memory pressure and creating
923 	 * a new chunk would happen soon.
924 	 */
925 	bit_off = ALIGN(chunk->contig_bits_start, align) -
926 		  chunk->contig_bits_start;
927 	if (bit_off + alloc_bits > chunk->contig_bits)
928 		return -1;
929 
930 	bit_off = chunk->first_bit;
931 	bits = 0;
932 	pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
933 		if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
934 						   &next_off))
935 			break;
936 
937 		bit_off = next_off;
938 		bits = 0;
939 	}
940 
941 	if (bit_off == pcpu_chunk_map_bits(chunk))
942 		return -1;
943 
944 	return bit_off;
945 }
946 
947 /**
948  * pcpu_alloc_area - allocates an area from a pcpu_chunk
949  * @chunk: chunk of interest
950  * @alloc_bits: size of request in allocation units
951  * @align: alignment of area (max PAGE_SIZE)
952  * @start: bit_off to start searching
953  *
954  * This function takes in a @start offset to begin searching to fit an
955  * allocation of @alloc_bits with alignment @align.  It needs to scan
956  * the allocation map because if it fits within the block's contig hint,
957  * @start will be block->first_free. This is an attempt to fill the
958  * allocation prior to breaking the contig hint.  The allocation and
959  * boundary maps are updated accordingly if it confirms a valid
960  * free area.
961  *
962  * RETURNS:
963  * Allocated addr offset in @chunk on success.
964  * -1 if no matching area is found.
965  */
966 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
967 			   size_t align, int start)
968 {
969 	size_t align_mask = (align) ? (align - 1) : 0;
970 	int bit_off, end, oslot;
971 
972 	lockdep_assert_held(&pcpu_lock);
973 
974 	oslot = pcpu_chunk_slot(chunk);
975 
976 	/*
977 	 * Search to find a fit.
978 	 */
979 	end = start + alloc_bits + PCPU_BITMAP_BLOCK_BITS;
980 	bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start,
981 					     alloc_bits, align_mask);
982 	if (bit_off >= end)
983 		return -1;
984 
985 	/* update alloc map */
986 	bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
987 
988 	/* update boundary map */
989 	set_bit(bit_off, chunk->bound_map);
990 	bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
991 	set_bit(bit_off + alloc_bits, chunk->bound_map);
992 
993 	chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
994 
995 	/* update first free bit */
996 	if (bit_off == chunk->first_bit)
997 		chunk->first_bit = find_next_zero_bit(
998 					chunk->alloc_map,
999 					pcpu_chunk_map_bits(chunk),
1000 					bit_off + alloc_bits);
1001 
1002 	pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1003 
1004 	pcpu_chunk_relocate(chunk, oslot);
1005 
1006 	return bit_off * PCPU_MIN_ALLOC_SIZE;
1007 }
1008 
1009 /**
1010  * pcpu_free_area - frees the corresponding offset
1011  * @chunk: chunk of interest
1012  * @off: addr offset into chunk
1013  *
1014  * This function determines the size of an allocation to free using
1015  * the boundary bitmap and clears the allocation map.
1016  */
1017 static void pcpu_free_area(struct pcpu_chunk *chunk, int off)
1018 {
1019 	int bit_off, bits, end, oslot;
1020 
1021 	lockdep_assert_held(&pcpu_lock);
1022 	pcpu_stats_area_dealloc(chunk);
1023 
1024 	oslot = pcpu_chunk_slot(chunk);
1025 
1026 	bit_off = off / PCPU_MIN_ALLOC_SIZE;
1027 
1028 	/* find end index */
1029 	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1030 			    bit_off + 1);
1031 	bits = end - bit_off;
1032 	bitmap_clear(chunk->alloc_map, bit_off, bits);
1033 
1034 	/* update metadata */
1035 	chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE;
1036 
1037 	/* update first free bit */
1038 	chunk->first_bit = min(chunk->first_bit, bit_off);
1039 
1040 	pcpu_block_update_hint_free(chunk, bit_off, bits);
1041 
1042 	pcpu_chunk_relocate(chunk, oslot);
1043 }
1044 
1045 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1046 {
1047 	struct pcpu_block_md *md_block;
1048 
1049 	for (md_block = chunk->md_blocks;
1050 	     md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1051 	     md_block++) {
1052 		md_block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1053 		md_block->left_free = PCPU_BITMAP_BLOCK_BITS;
1054 		md_block->right_free = PCPU_BITMAP_BLOCK_BITS;
1055 	}
1056 }
1057 
1058 /**
1059  * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1060  * @tmp_addr: the start of the region served
1061  * @map_size: size of the region served
1062  *
1063  * This is responsible for creating the chunks that serve the first chunk.  The
1064  * base_addr is page aligned down of @tmp_addr while the region end is page
1065  * aligned up.  Offsets are kept track of to determine the region served. All
1066  * this is done to appease the bitmap allocator in avoiding partial blocks.
1067  *
1068  * RETURNS:
1069  * Chunk serving the region at @tmp_addr of @map_size.
1070  */
1071 static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1072 							 int map_size)
1073 {
1074 	struct pcpu_chunk *chunk;
1075 	unsigned long aligned_addr, lcm_align;
1076 	int start_offset, offset_bits, region_size, region_bits;
1077 
1078 	/* region calculations */
1079 	aligned_addr = tmp_addr & PAGE_MASK;
1080 
1081 	start_offset = tmp_addr - aligned_addr;
1082 
1083 	/*
1084 	 * Align the end of the region with the LCM of PAGE_SIZE and
1085 	 * PCPU_BITMAP_BLOCK_SIZE.  One of these constants is a multiple of
1086 	 * the other.
1087 	 */
1088 	lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1089 	region_size = ALIGN(start_offset + map_size, lcm_align);
1090 
1091 	/* allocate chunk */
1092 	chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) +
1093 				    BITS_TO_LONGS(region_size >> PAGE_SHIFT),
1094 				    0);
1095 
1096 	INIT_LIST_HEAD(&chunk->list);
1097 
1098 	chunk->base_addr = (void *)aligned_addr;
1099 	chunk->start_offset = start_offset;
1100 	chunk->end_offset = region_size - chunk->start_offset - map_size;
1101 
1102 	chunk->nr_pages = region_size >> PAGE_SHIFT;
1103 	region_bits = pcpu_chunk_map_bits(chunk);
1104 
1105 	chunk->alloc_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits) *
1106 					       sizeof(chunk->alloc_map[0]), 0);
1107 	chunk->bound_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits + 1) *
1108 					       sizeof(chunk->bound_map[0]), 0);
1109 	chunk->md_blocks = memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk) *
1110 					       sizeof(chunk->md_blocks[0]), 0);
1111 	pcpu_init_md_blocks(chunk);
1112 
1113 	/* manage populated page bitmap */
1114 	chunk->immutable = true;
1115 	bitmap_fill(chunk->populated, chunk->nr_pages);
1116 	chunk->nr_populated = chunk->nr_pages;
1117 	chunk->nr_empty_pop_pages =
1118 		pcpu_cnt_pop_pages(chunk, start_offset / PCPU_MIN_ALLOC_SIZE,
1119 				   map_size / PCPU_MIN_ALLOC_SIZE);
1120 
1121 	chunk->contig_bits = map_size / PCPU_MIN_ALLOC_SIZE;
1122 	chunk->free_bytes = map_size;
1123 
1124 	if (chunk->start_offset) {
1125 		/* hide the beginning of the bitmap */
1126 		offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1127 		bitmap_set(chunk->alloc_map, 0, offset_bits);
1128 		set_bit(0, chunk->bound_map);
1129 		set_bit(offset_bits, chunk->bound_map);
1130 
1131 		chunk->first_bit = offset_bits;
1132 
1133 		pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1134 	}
1135 
1136 	if (chunk->end_offset) {
1137 		/* hide the end of the bitmap */
1138 		offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1139 		bitmap_set(chunk->alloc_map,
1140 			   pcpu_chunk_map_bits(chunk) - offset_bits,
1141 			   offset_bits);
1142 		set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1143 			chunk->bound_map);
1144 		set_bit(region_bits, chunk->bound_map);
1145 
1146 		pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1147 					     - offset_bits, offset_bits);
1148 	}
1149 
1150 	return chunk;
1151 }
1152 
1153 static struct pcpu_chunk *pcpu_alloc_chunk(void)
1154 {
1155 	struct pcpu_chunk *chunk;
1156 	int region_bits;
1157 
1158 	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
1159 	if (!chunk)
1160 		return NULL;
1161 
1162 	INIT_LIST_HEAD(&chunk->list);
1163 	chunk->nr_pages = pcpu_unit_pages;
1164 	region_bits = pcpu_chunk_map_bits(chunk);
1165 
1166 	chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1167 					   sizeof(chunk->alloc_map[0]));
1168 	if (!chunk->alloc_map)
1169 		goto alloc_map_fail;
1170 
1171 	chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1172 					   sizeof(chunk->bound_map[0]));
1173 	if (!chunk->bound_map)
1174 		goto bound_map_fail;
1175 
1176 	chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1177 					   sizeof(chunk->md_blocks[0]));
1178 	if (!chunk->md_blocks)
1179 		goto md_blocks_fail;
1180 
1181 	pcpu_init_md_blocks(chunk);
1182 
1183 	/* init metadata */
1184 	chunk->contig_bits = region_bits;
1185 	chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1186 
1187 	return chunk;
1188 
1189 md_blocks_fail:
1190 	pcpu_mem_free(chunk->bound_map);
1191 bound_map_fail:
1192 	pcpu_mem_free(chunk->alloc_map);
1193 alloc_map_fail:
1194 	pcpu_mem_free(chunk);
1195 
1196 	return NULL;
1197 }
1198 
1199 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1200 {
1201 	if (!chunk)
1202 		return;
1203 	pcpu_mem_free(chunk->bound_map);
1204 	pcpu_mem_free(chunk->alloc_map);
1205 	pcpu_mem_free(chunk);
1206 }
1207 
1208 /**
1209  * pcpu_chunk_populated - post-population bookkeeping
1210  * @chunk: pcpu_chunk which got populated
1211  * @page_start: the start page
1212  * @page_end: the end page
1213  * @for_alloc: if this is to populate for allocation
1214  *
1215  * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
1216  * the bookkeeping information accordingly.  Must be called after each
1217  * successful population.
1218  *
1219  * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1220  * is to serve an allocation in that area.
1221  */
1222 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1223 				 int page_end, bool for_alloc)
1224 {
1225 	int nr = page_end - page_start;
1226 
1227 	lockdep_assert_held(&pcpu_lock);
1228 
1229 	bitmap_set(chunk->populated, page_start, nr);
1230 	chunk->nr_populated += nr;
1231 
1232 	if (!for_alloc) {
1233 		chunk->nr_empty_pop_pages += nr;
1234 		pcpu_nr_empty_pop_pages += nr;
1235 	}
1236 }
1237 
1238 /**
1239  * pcpu_chunk_depopulated - post-depopulation bookkeeping
1240  * @chunk: pcpu_chunk which got depopulated
1241  * @page_start: the start page
1242  * @page_end: the end page
1243  *
1244  * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1245  * Update the bookkeeping information accordingly.  Must be called after
1246  * each successful depopulation.
1247  */
1248 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1249 				   int page_start, int page_end)
1250 {
1251 	int nr = page_end - page_start;
1252 
1253 	lockdep_assert_held(&pcpu_lock);
1254 
1255 	bitmap_clear(chunk->populated, page_start, nr);
1256 	chunk->nr_populated -= nr;
1257 	chunk->nr_empty_pop_pages -= nr;
1258 	pcpu_nr_empty_pop_pages -= nr;
1259 }
1260 
1261 /*
1262  * Chunk management implementation.
1263  *
1264  * To allow different implementations, chunk alloc/free and
1265  * [de]population are implemented in a separate file which is pulled
1266  * into this file and compiled together.  The following functions
1267  * should be implemented.
1268  *
1269  * pcpu_populate_chunk		- populate the specified range of a chunk
1270  * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
1271  * pcpu_create_chunk		- create a new chunk
1272  * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
1273  * pcpu_addr_to_page		- translate address to physical address
1274  * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
1275  */
1276 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
1277 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
1278 static struct pcpu_chunk *pcpu_create_chunk(void);
1279 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1280 static struct page *pcpu_addr_to_page(void *addr);
1281 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1282 
1283 #ifdef CONFIG_NEED_PER_CPU_KM
1284 #include "percpu-km.c"
1285 #else
1286 #include "percpu-vm.c"
1287 #endif
1288 
1289 /**
1290  * pcpu_chunk_addr_search - determine chunk containing specified address
1291  * @addr: address for which the chunk needs to be determined.
1292  *
1293  * This is an internal function that handles all but static allocations.
1294  * Static percpu address values should never be passed into the allocator.
1295  *
1296  * RETURNS:
1297  * The address of the found chunk.
1298  */
1299 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1300 {
1301 	/* is it in the dynamic region (first chunk)? */
1302 	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1303 		return pcpu_first_chunk;
1304 
1305 	/* is it in the reserved region? */
1306 	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1307 		return pcpu_reserved_chunk;
1308 
1309 	/*
1310 	 * The address is relative to unit0 which might be unused and
1311 	 * thus unmapped.  Offset the address to the unit space of the
1312 	 * current processor before looking it up in the vmalloc
1313 	 * space.  Note that any possible cpu id can be used here, so
1314 	 * there's no need to worry about preemption or cpu hotplug.
1315 	 */
1316 	addr += pcpu_unit_offsets[raw_smp_processor_id()];
1317 	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1318 }
1319 
1320 /**
1321  * pcpu_alloc - the percpu allocator
1322  * @size: size of area to allocate in bytes
1323  * @align: alignment of area (max PAGE_SIZE)
1324  * @reserved: allocate from the reserved chunk if available
1325  * @gfp: allocation flags
1326  *
1327  * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
1328  * contain %GFP_KERNEL, the allocation is atomic.
1329  *
1330  * RETURNS:
1331  * Percpu pointer to the allocated area on success, NULL on failure.
1332  */
1333 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1334 				 gfp_t gfp)
1335 {
1336 	static int warn_limit = 10;
1337 	struct pcpu_chunk *chunk;
1338 	const char *err;
1339 	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1340 	int slot, off, cpu, ret;
1341 	unsigned long flags;
1342 	void __percpu *ptr;
1343 	size_t bits, bit_align;
1344 
1345 	/*
1346 	 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1347 	 * therefore alignment must be a minimum of that many bytes.
1348 	 * An allocation may have internal fragmentation from rounding up
1349 	 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1350 	 */
1351 	if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1352 		align = PCPU_MIN_ALLOC_SIZE;
1353 
1354 	size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1355 	bits = size >> PCPU_MIN_ALLOC_SHIFT;
1356 	bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1357 
1358 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1359 		     !is_power_of_2(align))) {
1360 		WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1361 		     size, align);
1362 		return NULL;
1363 	}
1364 
1365 	if (!is_atomic)
1366 		mutex_lock(&pcpu_alloc_mutex);
1367 
1368 	spin_lock_irqsave(&pcpu_lock, flags);
1369 
1370 	/* serve reserved allocations from the reserved chunk if available */
1371 	if (reserved && pcpu_reserved_chunk) {
1372 		chunk = pcpu_reserved_chunk;
1373 
1374 		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1375 		if (off < 0) {
1376 			err = "alloc from reserved chunk failed";
1377 			goto fail_unlock;
1378 		}
1379 
1380 		off = pcpu_alloc_area(chunk, bits, bit_align, off);
1381 		if (off >= 0)
1382 			goto area_found;
1383 
1384 		err = "alloc from reserved chunk failed";
1385 		goto fail_unlock;
1386 	}
1387 
1388 restart:
1389 	/* search through normal chunks */
1390 	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1391 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1392 			off = pcpu_find_block_fit(chunk, bits, bit_align,
1393 						  is_atomic);
1394 			if (off < 0)
1395 				continue;
1396 
1397 			off = pcpu_alloc_area(chunk, bits, bit_align, off);
1398 			if (off >= 0)
1399 				goto area_found;
1400 
1401 		}
1402 	}
1403 
1404 	spin_unlock_irqrestore(&pcpu_lock, flags);
1405 
1406 	/*
1407 	 * No space left.  Create a new chunk.  We don't want multiple
1408 	 * tasks to create chunks simultaneously.  Serialize and create iff
1409 	 * there's still no empty chunk after grabbing the mutex.
1410 	 */
1411 	if (is_atomic) {
1412 		err = "atomic alloc failed, no space left";
1413 		goto fail;
1414 	}
1415 
1416 	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1417 		chunk = pcpu_create_chunk();
1418 		if (!chunk) {
1419 			err = "failed to allocate new chunk";
1420 			goto fail;
1421 		}
1422 
1423 		spin_lock_irqsave(&pcpu_lock, flags);
1424 		pcpu_chunk_relocate(chunk, -1);
1425 	} else {
1426 		spin_lock_irqsave(&pcpu_lock, flags);
1427 	}
1428 
1429 	goto restart;
1430 
1431 area_found:
1432 	pcpu_stats_area_alloc(chunk, size);
1433 	spin_unlock_irqrestore(&pcpu_lock, flags);
1434 
1435 	/* populate if not all pages are already there */
1436 	if (!is_atomic) {
1437 		int page_start, page_end, rs, re;
1438 
1439 		page_start = PFN_DOWN(off);
1440 		page_end = PFN_UP(off + size);
1441 
1442 		pcpu_for_each_unpop_region(chunk->populated, rs, re,
1443 					   page_start, page_end) {
1444 			WARN_ON(chunk->immutable);
1445 
1446 			ret = pcpu_populate_chunk(chunk, rs, re);
1447 
1448 			spin_lock_irqsave(&pcpu_lock, flags);
1449 			if (ret) {
1450 				pcpu_free_area(chunk, off);
1451 				err = "failed to populate";
1452 				goto fail_unlock;
1453 			}
1454 			pcpu_chunk_populated(chunk, rs, re, true);
1455 			spin_unlock_irqrestore(&pcpu_lock, flags);
1456 		}
1457 
1458 		mutex_unlock(&pcpu_alloc_mutex);
1459 	}
1460 
1461 	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1462 		pcpu_schedule_balance_work();
1463 
1464 	/* clear the areas and return address relative to base address */
1465 	for_each_possible_cpu(cpu)
1466 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1467 
1468 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1469 	kmemleak_alloc_percpu(ptr, size, gfp);
1470 
1471 	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1472 			chunk->base_addr, off, ptr);
1473 
1474 	return ptr;
1475 
1476 fail_unlock:
1477 	spin_unlock_irqrestore(&pcpu_lock, flags);
1478 fail:
1479 	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1480 
1481 	if (!is_atomic && warn_limit) {
1482 		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1483 			size, align, is_atomic, err);
1484 		dump_stack();
1485 		if (!--warn_limit)
1486 			pr_info("limit reached, disable warning\n");
1487 	}
1488 	if (is_atomic) {
1489 		/* see the flag handling in pcpu_blance_workfn() */
1490 		pcpu_atomic_alloc_failed = true;
1491 		pcpu_schedule_balance_work();
1492 	} else {
1493 		mutex_unlock(&pcpu_alloc_mutex);
1494 	}
1495 	return NULL;
1496 }
1497 
1498 /**
1499  * __alloc_percpu_gfp - allocate dynamic percpu area
1500  * @size: size of area to allocate in bytes
1501  * @align: alignment of area (max PAGE_SIZE)
1502  * @gfp: allocation flags
1503  *
1504  * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
1505  * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1506  * be called from any context but is a lot more likely to fail.
1507  *
1508  * RETURNS:
1509  * Percpu pointer to the allocated area on success, NULL on failure.
1510  */
1511 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1512 {
1513 	return pcpu_alloc(size, align, false, gfp);
1514 }
1515 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1516 
1517 /**
1518  * __alloc_percpu - allocate dynamic percpu area
1519  * @size: size of area to allocate in bytes
1520  * @align: alignment of area (max PAGE_SIZE)
1521  *
1522  * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1523  */
1524 void __percpu *__alloc_percpu(size_t size, size_t align)
1525 {
1526 	return pcpu_alloc(size, align, false, GFP_KERNEL);
1527 }
1528 EXPORT_SYMBOL_GPL(__alloc_percpu);
1529 
1530 /**
1531  * __alloc_reserved_percpu - allocate reserved percpu area
1532  * @size: size of area to allocate in bytes
1533  * @align: alignment of area (max PAGE_SIZE)
1534  *
1535  * Allocate zero-filled percpu area of @size bytes aligned at @align
1536  * from reserved percpu area if arch has set it up; otherwise,
1537  * allocation is served from the same dynamic area.  Might sleep.
1538  * Might trigger writeouts.
1539  *
1540  * CONTEXT:
1541  * Does GFP_KERNEL allocation.
1542  *
1543  * RETURNS:
1544  * Percpu pointer to the allocated area on success, NULL on failure.
1545  */
1546 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1547 {
1548 	return pcpu_alloc(size, align, true, GFP_KERNEL);
1549 }
1550 
1551 /**
1552  * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1553  * @work: unused
1554  *
1555  * Reclaim all fully free chunks except for the first one.
1556  */
1557 static void pcpu_balance_workfn(struct work_struct *work)
1558 {
1559 	LIST_HEAD(to_free);
1560 	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1561 	struct pcpu_chunk *chunk, *next;
1562 	int slot, nr_to_pop, ret;
1563 
1564 	/*
1565 	 * There's no reason to keep around multiple unused chunks and VM
1566 	 * areas can be scarce.  Destroy all free chunks except for one.
1567 	 */
1568 	mutex_lock(&pcpu_alloc_mutex);
1569 	spin_lock_irq(&pcpu_lock);
1570 
1571 	list_for_each_entry_safe(chunk, next, free_head, list) {
1572 		WARN_ON(chunk->immutable);
1573 
1574 		/* spare the first one */
1575 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1576 			continue;
1577 
1578 		list_move(&chunk->list, &to_free);
1579 	}
1580 
1581 	spin_unlock_irq(&pcpu_lock);
1582 
1583 	list_for_each_entry_safe(chunk, next, &to_free, list) {
1584 		int rs, re;
1585 
1586 		pcpu_for_each_pop_region(chunk->populated, rs, re, 0,
1587 					 chunk->nr_pages) {
1588 			pcpu_depopulate_chunk(chunk, rs, re);
1589 			spin_lock_irq(&pcpu_lock);
1590 			pcpu_chunk_depopulated(chunk, rs, re);
1591 			spin_unlock_irq(&pcpu_lock);
1592 		}
1593 		pcpu_destroy_chunk(chunk);
1594 	}
1595 
1596 	/*
1597 	 * Ensure there are certain number of free populated pages for
1598 	 * atomic allocs.  Fill up from the most packed so that atomic
1599 	 * allocs don't increase fragmentation.  If atomic allocation
1600 	 * failed previously, always populate the maximum amount.  This
1601 	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1602 	 * failing indefinitely; however, large atomic allocs are not
1603 	 * something we support properly and can be highly unreliable and
1604 	 * inefficient.
1605 	 */
1606 retry_pop:
1607 	if (pcpu_atomic_alloc_failed) {
1608 		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1609 		/* best effort anyway, don't worry about synchronization */
1610 		pcpu_atomic_alloc_failed = false;
1611 	} else {
1612 		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1613 				  pcpu_nr_empty_pop_pages,
1614 				  0, PCPU_EMPTY_POP_PAGES_HIGH);
1615 	}
1616 
1617 	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1618 		int nr_unpop = 0, rs, re;
1619 
1620 		if (!nr_to_pop)
1621 			break;
1622 
1623 		spin_lock_irq(&pcpu_lock);
1624 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1625 			nr_unpop = chunk->nr_pages - chunk->nr_populated;
1626 			if (nr_unpop)
1627 				break;
1628 		}
1629 		spin_unlock_irq(&pcpu_lock);
1630 
1631 		if (!nr_unpop)
1632 			continue;
1633 
1634 		/* @chunk can't go away while pcpu_alloc_mutex is held */
1635 		pcpu_for_each_unpop_region(chunk->populated, rs, re, 0,
1636 					   chunk->nr_pages) {
1637 			int nr = min(re - rs, nr_to_pop);
1638 
1639 			ret = pcpu_populate_chunk(chunk, rs, rs + nr);
1640 			if (!ret) {
1641 				nr_to_pop -= nr;
1642 				spin_lock_irq(&pcpu_lock);
1643 				pcpu_chunk_populated(chunk, rs, rs + nr, false);
1644 				spin_unlock_irq(&pcpu_lock);
1645 			} else {
1646 				nr_to_pop = 0;
1647 			}
1648 
1649 			if (!nr_to_pop)
1650 				break;
1651 		}
1652 	}
1653 
1654 	if (nr_to_pop) {
1655 		/* ran out of chunks to populate, create a new one and retry */
1656 		chunk = pcpu_create_chunk();
1657 		if (chunk) {
1658 			spin_lock_irq(&pcpu_lock);
1659 			pcpu_chunk_relocate(chunk, -1);
1660 			spin_unlock_irq(&pcpu_lock);
1661 			goto retry_pop;
1662 		}
1663 	}
1664 
1665 	mutex_unlock(&pcpu_alloc_mutex);
1666 }
1667 
1668 /**
1669  * free_percpu - free percpu area
1670  * @ptr: pointer to area to free
1671  *
1672  * Free percpu area @ptr.
1673  *
1674  * CONTEXT:
1675  * Can be called from atomic context.
1676  */
1677 void free_percpu(void __percpu *ptr)
1678 {
1679 	void *addr;
1680 	struct pcpu_chunk *chunk;
1681 	unsigned long flags;
1682 	int off;
1683 
1684 	if (!ptr)
1685 		return;
1686 
1687 	kmemleak_free_percpu(ptr);
1688 
1689 	addr = __pcpu_ptr_to_addr(ptr);
1690 
1691 	spin_lock_irqsave(&pcpu_lock, flags);
1692 
1693 	chunk = pcpu_chunk_addr_search(addr);
1694 	off = addr - chunk->base_addr;
1695 
1696 	pcpu_free_area(chunk, off);
1697 
1698 	/* if there are more than one fully free chunks, wake up grim reaper */
1699 	if (chunk->free_bytes == pcpu_unit_size) {
1700 		struct pcpu_chunk *pos;
1701 
1702 		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1703 			if (pos != chunk) {
1704 				pcpu_schedule_balance_work();
1705 				break;
1706 			}
1707 	}
1708 
1709 	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
1710 
1711 	spin_unlock_irqrestore(&pcpu_lock, flags);
1712 }
1713 EXPORT_SYMBOL_GPL(free_percpu);
1714 
1715 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1716 {
1717 #ifdef CONFIG_SMP
1718 	const size_t static_size = __per_cpu_end - __per_cpu_start;
1719 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1720 	unsigned int cpu;
1721 
1722 	for_each_possible_cpu(cpu) {
1723 		void *start = per_cpu_ptr(base, cpu);
1724 		void *va = (void *)addr;
1725 
1726 		if (va >= start && va < start + static_size) {
1727 			if (can_addr) {
1728 				*can_addr = (unsigned long) (va - start);
1729 				*can_addr += (unsigned long)
1730 					per_cpu_ptr(base, get_boot_cpu_id());
1731 			}
1732 			return true;
1733 		}
1734 	}
1735 #endif
1736 	/* on UP, can't distinguish from other static vars, always false */
1737 	return false;
1738 }
1739 
1740 /**
1741  * is_kernel_percpu_address - test whether address is from static percpu area
1742  * @addr: address to test
1743  *
1744  * Test whether @addr belongs to in-kernel static percpu area.  Module
1745  * static percpu areas are not considered.  For those, use
1746  * is_module_percpu_address().
1747  *
1748  * RETURNS:
1749  * %true if @addr is from in-kernel static percpu area, %false otherwise.
1750  */
1751 bool is_kernel_percpu_address(unsigned long addr)
1752 {
1753 	return __is_kernel_percpu_address(addr, NULL);
1754 }
1755 
1756 /**
1757  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1758  * @addr: the address to be converted to physical address
1759  *
1760  * Given @addr which is dereferenceable address obtained via one of
1761  * percpu access macros, this function translates it into its physical
1762  * address.  The caller is responsible for ensuring @addr stays valid
1763  * until this function finishes.
1764  *
1765  * percpu allocator has special setup for the first chunk, which currently
1766  * supports either embedding in linear address space or vmalloc mapping,
1767  * and, from the second one, the backing allocator (currently either vm or
1768  * km) provides translation.
1769  *
1770  * The addr can be translated simply without checking if it falls into the
1771  * first chunk. But the current code reflects better how percpu allocator
1772  * actually works, and the verification can discover both bugs in percpu
1773  * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1774  * code.
1775  *
1776  * RETURNS:
1777  * The physical address for @addr.
1778  */
1779 phys_addr_t per_cpu_ptr_to_phys(void *addr)
1780 {
1781 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1782 	bool in_first_chunk = false;
1783 	unsigned long first_low, first_high;
1784 	unsigned int cpu;
1785 
1786 	/*
1787 	 * The following test on unit_low/high isn't strictly
1788 	 * necessary but will speed up lookups of addresses which
1789 	 * aren't in the first chunk.
1790 	 *
1791 	 * The address check is against full chunk sizes.  pcpu_base_addr
1792 	 * points to the beginning of the first chunk including the
1793 	 * static region.  Assumes good intent as the first chunk may
1794 	 * not be full (ie. < pcpu_unit_pages in size).
1795 	 */
1796 	first_low = (unsigned long)pcpu_base_addr +
1797 		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
1798 	first_high = (unsigned long)pcpu_base_addr +
1799 		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
1800 	if ((unsigned long)addr >= first_low &&
1801 	    (unsigned long)addr < first_high) {
1802 		for_each_possible_cpu(cpu) {
1803 			void *start = per_cpu_ptr(base, cpu);
1804 
1805 			if (addr >= start && addr < start + pcpu_unit_size) {
1806 				in_first_chunk = true;
1807 				break;
1808 			}
1809 		}
1810 	}
1811 
1812 	if (in_first_chunk) {
1813 		if (!is_vmalloc_addr(addr))
1814 			return __pa(addr);
1815 		else
1816 			return page_to_phys(vmalloc_to_page(addr)) +
1817 			       offset_in_page(addr);
1818 	} else
1819 		return page_to_phys(pcpu_addr_to_page(addr)) +
1820 		       offset_in_page(addr);
1821 }
1822 
1823 /**
1824  * pcpu_alloc_alloc_info - allocate percpu allocation info
1825  * @nr_groups: the number of groups
1826  * @nr_units: the number of units
1827  *
1828  * Allocate ai which is large enough for @nr_groups groups containing
1829  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1830  * cpu_map array which is long enough for @nr_units and filled with
1831  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1832  * pointer of other groups.
1833  *
1834  * RETURNS:
1835  * Pointer to the allocated pcpu_alloc_info on success, NULL on
1836  * failure.
1837  */
1838 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1839 						      int nr_units)
1840 {
1841 	struct pcpu_alloc_info *ai;
1842 	size_t base_size, ai_size;
1843 	void *ptr;
1844 	int unit;
1845 
1846 	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1847 			  __alignof__(ai->groups[0].cpu_map[0]));
1848 	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1849 
1850 	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1851 	if (!ptr)
1852 		return NULL;
1853 	ai = ptr;
1854 	ptr += base_size;
1855 
1856 	ai->groups[0].cpu_map = ptr;
1857 
1858 	for (unit = 0; unit < nr_units; unit++)
1859 		ai->groups[0].cpu_map[unit] = NR_CPUS;
1860 
1861 	ai->nr_groups = nr_groups;
1862 	ai->__ai_size = PFN_ALIGN(ai_size);
1863 
1864 	return ai;
1865 }
1866 
1867 /**
1868  * pcpu_free_alloc_info - free percpu allocation info
1869  * @ai: pcpu_alloc_info to free
1870  *
1871  * Free @ai which was allocated by pcpu_alloc_alloc_info().
1872  */
1873 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1874 {
1875 	memblock_free_early(__pa(ai), ai->__ai_size);
1876 }
1877 
1878 /**
1879  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1880  * @lvl: loglevel
1881  * @ai: allocation info to dump
1882  *
1883  * Print out information about @ai using loglevel @lvl.
1884  */
1885 static void pcpu_dump_alloc_info(const char *lvl,
1886 				 const struct pcpu_alloc_info *ai)
1887 {
1888 	int group_width = 1, cpu_width = 1, width;
1889 	char empty_str[] = "--------";
1890 	int alloc = 0, alloc_end = 0;
1891 	int group, v;
1892 	int upa, apl;	/* units per alloc, allocs per line */
1893 
1894 	v = ai->nr_groups;
1895 	while (v /= 10)
1896 		group_width++;
1897 
1898 	v = num_possible_cpus();
1899 	while (v /= 10)
1900 		cpu_width++;
1901 	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1902 
1903 	upa = ai->alloc_size / ai->unit_size;
1904 	width = upa * (cpu_width + 1) + group_width + 3;
1905 	apl = rounddown_pow_of_two(max(60 / width, 1));
1906 
1907 	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1908 	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1909 	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1910 
1911 	for (group = 0; group < ai->nr_groups; group++) {
1912 		const struct pcpu_group_info *gi = &ai->groups[group];
1913 		int unit = 0, unit_end = 0;
1914 
1915 		BUG_ON(gi->nr_units % upa);
1916 		for (alloc_end += gi->nr_units / upa;
1917 		     alloc < alloc_end; alloc++) {
1918 			if (!(alloc % apl)) {
1919 				pr_cont("\n");
1920 				printk("%spcpu-alloc: ", lvl);
1921 			}
1922 			pr_cont("[%0*d] ", group_width, group);
1923 
1924 			for (unit_end += upa; unit < unit_end; unit++)
1925 				if (gi->cpu_map[unit] != NR_CPUS)
1926 					pr_cont("%0*d ",
1927 						cpu_width, gi->cpu_map[unit]);
1928 				else
1929 					pr_cont("%s ", empty_str);
1930 		}
1931 	}
1932 	pr_cont("\n");
1933 }
1934 
1935 /**
1936  * pcpu_setup_first_chunk - initialize the first percpu chunk
1937  * @ai: pcpu_alloc_info describing how to percpu area is shaped
1938  * @base_addr: mapped address
1939  *
1940  * Initialize the first percpu chunk which contains the kernel static
1941  * perpcu area.  This function is to be called from arch percpu area
1942  * setup path.
1943  *
1944  * @ai contains all information necessary to initialize the first
1945  * chunk and prime the dynamic percpu allocator.
1946  *
1947  * @ai->static_size is the size of static percpu area.
1948  *
1949  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1950  * reserve after the static area in the first chunk.  This reserves
1951  * the first chunk such that it's available only through reserved
1952  * percpu allocation.  This is primarily used to serve module percpu
1953  * static areas on architectures where the addressing model has
1954  * limited offset range for symbol relocations to guarantee module
1955  * percpu symbols fall inside the relocatable range.
1956  *
1957  * @ai->dyn_size determines the number of bytes available for dynamic
1958  * allocation in the first chunk.  The area between @ai->static_size +
1959  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1960  *
1961  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1962  * and equal to or larger than @ai->static_size + @ai->reserved_size +
1963  * @ai->dyn_size.
1964  *
1965  * @ai->atom_size is the allocation atom size and used as alignment
1966  * for vm areas.
1967  *
1968  * @ai->alloc_size is the allocation size and always multiple of
1969  * @ai->atom_size.  This is larger than @ai->atom_size if
1970  * @ai->unit_size is larger than @ai->atom_size.
1971  *
1972  * @ai->nr_groups and @ai->groups describe virtual memory layout of
1973  * percpu areas.  Units which should be colocated are put into the
1974  * same group.  Dynamic VM areas will be allocated according to these
1975  * groupings.  If @ai->nr_groups is zero, a single group containing
1976  * all units is assumed.
1977  *
1978  * The caller should have mapped the first chunk at @base_addr and
1979  * copied static data to each unit.
1980  *
1981  * The first chunk will always contain a static and a dynamic region.
1982  * However, the static region is not managed by any chunk.  If the first
1983  * chunk also contains a reserved region, it is served by two chunks -
1984  * one for the reserved region and one for the dynamic region.  They
1985  * share the same vm, but use offset regions in the area allocation map.
1986  * The chunk serving the dynamic region is circulated in the chunk slots
1987  * and available for dynamic allocation like any other chunk.
1988  *
1989  * RETURNS:
1990  * 0 on success, -errno on failure.
1991  */
1992 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1993 				  void *base_addr)
1994 {
1995 	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1996 	size_t static_size, dyn_size;
1997 	struct pcpu_chunk *chunk;
1998 	unsigned long *group_offsets;
1999 	size_t *group_sizes;
2000 	unsigned long *unit_off;
2001 	unsigned int cpu;
2002 	int *unit_map;
2003 	int group, unit, i;
2004 	int map_size;
2005 	unsigned long tmp_addr;
2006 
2007 #define PCPU_SETUP_BUG_ON(cond)	do {					\
2008 	if (unlikely(cond)) {						\
2009 		pr_emerg("failed to initialize, %s\n", #cond);		\
2010 		pr_emerg("cpu_possible_mask=%*pb\n",			\
2011 			 cpumask_pr_args(cpu_possible_mask));		\
2012 		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
2013 		BUG();							\
2014 	}								\
2015 } while (0)
2016 
2017 	/* sanity checks */
2018 	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2019 #ifdef CONFIG_SMP
2020 	PCPU_SETUP_BUG_ON(!ai->static_size);
2021 	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2022 #endif
2023 	PCPU_SETUP_BUG_ON(!base_addr);
2024 	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2025 	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2026 	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2027 	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2028 	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2029 	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2030 	PCPU_SETUP_BUG_ON(!ai->dyn_size);
2031 	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2032 	PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2033 			    IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2034 	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2035 
2036 	/* process group information and build config tables accordingly */
2037 	group_offsets = memblock_virt_alloc(ai->nr_groups *
2038 					     sizeof(group_offsets[0]), 0);
2039 	group_sizes = memblock_virt_alloc(ai->nr_groups *
2040 					   sizeof(group_sizes[0]), 0);
2041 	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
2042 	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
2043 
2044 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2045 		unit_map[cpu] = UINT_MAX;
2046 
2047 	pcpu_low_unit_cpu = NR_CPUS;
2048 	pcpu_high_unit_cpu = NR_CPUS;
2049 
2050 	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2051 		const struct pcpu_group_info *gi = &ai->groups[group];
2052 
2053 		group_offsets[group] = gi->base_offset;
2054 		group_sizes[group] = gi->nr_units * ai->unit_size;
2055 
2056 		for (i = 0; i < gi->nr_units; i++) {
2057 			cpu = gi->cpu_map[i];
2058 			if (cpu == NR_CPUS)
2059 				continue;
2060 
2061 			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2062 			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2063 			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2064 
2065 			unit_map[cpu] = unit + i;
2066 			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2067 
2068 			/* determine low/high unit_cpu */
2069 			if (pcpu_low_unit_cpu == NR_CPUS ||
2070 			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2071 				pcpu_low_unit_cpu = cpu;
2072 			if (pcpu_high_unit_cpu == NR_CPUS ||
2073 			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2074 				pcpu_high_unit_cpu = cpu;
2075 		}
2076 	}
2077 	pcpu_nr_units = unit;
2078 
2079 	for_each_possible_cpu(cpu)
2080 		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2081 
2082 	/* we're done parsing the input, undefine BUG macro and dump config */
2083 #undef PCPU_SETUP_BUG_ON
2084 	pcpu_dump_alloc_info(KERN_DEBUG, ai);
2085 
2086 	pcpu_nr_groups = ai->nr_groups;
2087 	pcpu_group_offsets = group_offsets;
2088 	pcpu_group_sizes = group_sizes;
2089 	pcpu_unit_map = unit_map;
2090 	pcpu_unit_offsets = unit_off;
2091 
2092 	/* determine basic parameters */
2093 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2094 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2095 	pcpu_atom_size = ai->atom_size;
2096 	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
2097 		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
2098 
2099 	pcpu_stats_save_ai(ai);
2100 
2101 	/*
2102 	 * Allocate chunk slots.  The additional last slot is for
2103 	 * empty chunks.
2104 	 */
2105 	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
2106 	pcpu_slot = memblock_virt_alloc(
2107 			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
2108 	for (i = 0; i < pcpu_nr_slots; i++)
2109 		INIT_LIST_HEAD(&pcpu_slot[i]);
2110 
2111 	/*
2112 	 * The end of the static region needs to be aligned with the
2113 	 * minimum allocation size as this offsets the reserved and
2114 	 * dynamic region.  The first chunk ends page aligned by
2115 	 * expanding the dynamic region, therefore the dynamic region
2116 	 * can be shrunk to compensate while still staying above the
2117 	 * configured sizes.
2118 	 */
2119 	static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2120 	dyn_size = ai->dyn_size - (static_size - ai->static_size);
2121 
2122 	/*
2123 	 * Initialize first chunk.
2124 	 * If the reserved_size is non-zero, this initializes the reserved
2125 	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
2126 	 * and the dynamic region is initialized here.  The first chunk,
2127 	 * pcpu_first_chunk, will always point to the chunk that serves
2128 	 * the dynamic region.
2129 	 */
2130 	tmp_addr = (unsigned long)base_addr + static_size;
2131 	map_size = ai->reserved_size ?: dyn_size;
2132 	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2133 
2134 	/* init dynamic chunk if necessary */
2135 	if (ai->reserved_size) {
2136 		pcpu_reserved_chunk = chunk;
2137 
2138 		tmp_addr = (unsigned long)base_addr + static_size +
2139 			   ai->reserved_size;
2140 		map_size = dyn_size;
2141 		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2142 	}
2143 
2144 	/* link the first chunk in */
2145 	pcpu_first_chunk = chunk;
2146 	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2147 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
2148 
2149 	pcpu_stats_chunk_alloc();
2150 	trace_percpu_create_chunk(base_addr);
2151 
2152 	/* we're done */
2153 	pcpu_base_addr = base_addr;
2154 	return 0;
2155 }
2156 
2157 #ifdef CONFIG_SMP
2158 
2159 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2160 	[PCPU_FC_AUTO]	= "auto",
2161 	[PCPU_FC_EMBED]	= "embed",
2162 	[PCPU_FC_PAGE]	= "page",
2163 };
2164 
2165 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2166 
2167 static int __init percpu_alloc_setup(char *str)
2168 {
2169 	if (!str)
2170 		return -EINVAL;
2171 
2172 	if (0)
2173 		/* nada */;
2174 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2175 	else if (!strcmp(str, "embed"))
2176 		pcpu_chosen_fc = PCPU_FC_EMBED;
2177 #endif
2178 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2179 	else if (!strcmp(str, "page"))
2180 		pcpu_chosen_fc = PCPU_FC_PAGE;
2181 #endif
2182 	else
2183 		pr_warn("unknown allocator %s specified\n", str);
2184 
2185 	return 0;
2186 }
2187 early_param("percpu_alloc", percpu_alloc_setup);
2188 
2189 /*
2190  * pcpu_embed_first_chunk() is used by the generic percpu setup.
2191  * Build it if needed by the arch config or the generic setup is going
2192  * to be used.
2193  */
2194 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2195 	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2196 #define BUILD_EMBED_FIRST_CHUNK
2197 #endif
2198 
2199 /* build pcpu_page_first_chunk() iff needed by the arch config */
2200 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2201 #define BUILD_PAGE_FIRST_CHUNK
2202 #endif
2203 
2204 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2205 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2206 /**
2207  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2208  * @reserved_size: the size of reserved percpu area in bytes
2209  * @dyn_size: minimum free size for dynamic allocation in bytes
2210  * @atom_size: allocation atom size
2211  * @cpu_distance_fn: callback to determine distance between cpus, optional
2212  *
2213  * This function determines grouping of units, their mappings to cpus
2214  * and other parameters considering needed percpu size, allocation
2215  * atom size and distances between CPUs.
2216  *
2217  * Groups are always multiples of atom size and CPUs which are of
2218  * LOCAL_DISTANCE both ways are grouped together and share space for
2219  * units in the same group.  The returned configuration is guaranteed
2220  * to have CPUs on different nodes on different groups and >=75% usage
2221  * of allocated virtual address space.
2222  *
2223  * RETURNS:
2224  * On success, pointer to the new allocation_info is returned.  On
2225  * failure, ERR_PTR value is returned.
2226  */
2227 static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
2228 				size_t reserved_size, size_t dyn_size,
2229 				size_t atom_size,
2230 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2231 {
2232 	static int group_map[NR_CPUS] __initdata;
2233 	static int group_cnt[NR_CPUS] __initdata;
2234 	const size_t static_size = __per_cpu_end - __per_cpu_start;
2235 	int nr_groups = 1, nr_units = 0;
2236 	size_t size_sum, min_unit_size, alloc_size;
2237 	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
2238 	int last_allocs, group, unit;
2239 	unsigned int cpu, tcpu;
2240 	struct pcpu_alloc_info *ai;
2241 	unsigned int *cpu_map;
2242 
2243 	/* this function may be called multiple times */
2244 	memset(group_map, 0, sizeof(group_map));
2245 	memset(group_cnt, 0, sizeof(group_cnt));
2246 
2247 	/* calculate size_sum and ensure dyn_size is enough for early alloc */
2248 	size_sum = PFN_ALIGN(static_size + reserved_size +
2249 			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2250 	dyn_size = size_sum - static_size - reserved_size;
2251 
2252 	/*
2253 	 * Determine min_unit_size, alloc_size and max_upa such that
2254 	 * alloc_size is multiple of atom_size and is the smallest
2255 	 * which can accommodate 4k aligned segments which are equal to
2256 	 * or larger than min_unit_size.
2257 	 */
2258 	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2259 
2260 	/* determine the maximum # of units that can fit in an allocation */
2261 	alloc_size = roundup(min_unit_size, atom_size);
2262 	upa = alloc_size / min_unit_size;
2263 	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2264 		upa--;
2265 	max_upa = upa;
2266 
2267 	/* group cpus according to their proximity */
2268 	for_each_possible_cpu(cpu) {
2269 		group = 0;
2270 	next_group:
2271 		for_each_possible_cpu(tcpu) {
2272 			if (cpu == tcpu)
2273 				break;
2274 			if (group_map[tcpu] == group && cpu_distance_fn &&
2275 			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
2276 			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
2277 				group++;
2278 				nr_groups = max(nr_groups, group + 1);
2279 				goto next_group;
2280 			}
2281 		}
2282 		group_map[cpu] = group;
2283 		group_cnt[group]++;
2284 	}
2285 
2286 	/*
2287 	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2288 	 * Expand the unit_size until we use >= 75% of the units allocated.
2289 	 * Related to atom_size, which could be much larger than the unit_size.
2290 	 */
2291 	last_allocs = INT_MAX;
2292 	for (upa = max_upa; upa; upa--) {
2293 		int allocs = 0, wasted = 0;
2294 
2295 		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2296 			continue;
2297 
2298 		for (group = 0; group < nr_groups; group++) {
2299 			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2300 			allocs += this_allocs;
2301 			wasted += this_allocs * upa - group_cnt[group];
2302 		}
2303 
2304 		/*
2305 		 * Don't accept if wastage is over 1/3.  The
2306 		 * greater-than comparison ensures upa==1 always
2307 		 * passes the following check.
2308 		 */
2309 		if (wasted > num_possible_cpus() / 3)
2310 			continue;
2311 
2312 		/* and then don't consume more memory */
2313 		if (allocs > last_allocs)
2314 			break;
2315 		last_allocs = allocs;
2316 		best_upa = upa;
2317 	}
2318 	upa = best_upa;
2319 
2320 	/* allocate and fill alloc_info */
2321 	for (group = 0; group < nr_groups; group++)
2322 		nr_units += roundup(group_cnt[group], upa);
2323 
2324 	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2325 	if (!ai)
2326 		return ERR_PTR(-ENOMEM);
2327 	cpu_map = ai->groups[0].cpu_map;
2328 
2329 	for (group = 0; group < nr_groups; group++) {
2330 		ai->groups[group].cpu_map = cpu_map;
2331 		cpu_map += roundup(group_cnt[group], upa);
2332 	}
2333 
2334 	ai->static_size = static_size;
2335 	ai->reserved_size = reserved_size;
2336 	ai->dyn_size = dyn_size;
2337 	ai->unit_size = alloc_size / upa;
2338 	ai->atom_size = atom_size;
2339 	ai->alloc_size = alloc_size;
2340 
2341 	for (group = 0, unit = 0; group_cnt[group]; group++) {
2342 		struct pcpu_group_info *gi = &ai->groups[group];
2343 
2344 		/*
2345 		 * Initialize base_offset as if all groups are located
2346 		 * back-to-back.  The caller should update this to
2347 		 * reflect actual allocation.
2348 		 */
2349 		gi->base_offset = unit * ai->unit_size;
2350 
2351 		for_each_possible_cpu(cpu)
2352 			if (group_map[cpu] == group)
2353 				gi->cpu_map[gi->nr_units++] = cpu;
2354 		gi->nr_units = roundup(gi->nr_units, upa);
2355 		unit += gi->nr_units;
2356 	}
2357 	BUG_ON(unit != nr_units);
2358 
2359 	return ai;
2360 }
2361 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2362 
2363 #if defined(BUILD_EMBED_FIRST_CHUNK)
2364 /**
2365  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2366  * @reserved_size: the size of reserved percpu area in bytes
2367  * @dyn_size: minimum free size for dynamic allocation in bytes
2368  * @atom_size: allocation atom size
2369  * @cpu_distance_fn: callback to determine distance between cpus, optional
2370  * @alloc_fn: function to allocate percpu page
2371  * @free_fn: function to free percpu page
2372  *
2373  * This is a helper to ease setting up embedded first percpu chunk and
2374  * can be called where pcpu_setup_first_chunk() is expected.
2375  *
2376  * If this function is used to setup the first chunk, it is allocated
2377  * by calling @alloc_fn and used as-is without being mapped into
2378  * vmalloc area.  Allocations are always whole multiples of @atom_size
2379  * aligned to @atom_size.
2380  *
2381  * This enables the first chunk to piggy back on the linear physical
2382  * mapping which often uses larger page size.  Please note that this
2383  * can result in very sparse cpu->unit mapping on NUMA machines thus
2384  * requiring large vmalloc address space.  Don't use this allocator if
2385  * vmalloc space is not orders of magnitude larger than distances
2386  * between node memory addresses (ie. 32bit NUMA machines).
2387  *
2388  * @dyn_size specifies the minimum dynamic area size.
2389  *
2390  * If the needed size is smaller than the minimum or specified unit
2391  * size, the leftover is returned using @free_fn.
2392  *
2393  * RETURNS:
2394  * 0 on success, -errno on failure.
2395  */
2396 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2397 				  size_t atom_size,
2398 				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
2399 				  pcpu_fc_alloc_fn_t alloc_fn,
2400 				  pcpu_fc_free_fn_t free_fn)
2401 {
2402 	void *base = (void *)ULONG_MAX;
2403 	void **areas = NULL;
2404 	struct pcpu_alloc_info *ai;
2405 	size_t size_sum, areas_size;
2406 	unsigned long max_distance;
2407 	int group, i, highest_group, rc;
2408 
2409 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2410 				   cpu_distance_fn);
2411 	if (IS_ERR(ai))
2412 		return PTR_ERR(ai);
2413 
2414 	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2415 	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2416 
2417 	areas = memblock_virt_alloc_nopanic(areas_size, 0);
2418 	if (!areas) {
2419 		rc = -ENOMEM;
2420 		goto out_free;
2421 	}
2422 
2423 	/* allocate, copy and determine base address & max_distance */
2424 	highest_group = 0;
2425 	for (group = 0; group < ai->nr_groups; group++) {
2426 		struct pcpu_group_info *gi = &ai->groups[group];
2427 		unsigned int cpu = NR_CPUS;
2428 		void *ptr;
2429 
2430 		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2431 			cpu = gi->cpu_map[i];
2432 		BUG_ON(cpu == NR_CPUS);
2433 
2434 		/* allocate space for the whole group */
2435 		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2436 		if (!ptr) {
2437 			rc = -ENOMEM;
2438 			goto out_free_areas;
2439 		}
2440 		/* kmemleak tracks the percpu allocations separately */
2441 		kmemleak_free(ptr);
2442 		areas[group] = ptr;
2443 
2444 		base = min(ptr, base);
2445 		if (ptr > areas[highest_group])
2446 			highest_group = group;
2447 	}
2448 	max_distance = areas[highest_group] - base;
2449 	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2450 
2451 	/* warn if maximum distance is further than 75% of vmalloc space */
2452 	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2453 		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2454 				max_distance, VMALLOC_TOTAL);
2455 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2456 		/* and fail if we have fallback */
2457 		rc = -EINVAL;
2458 		goto out_free_areas;
2459 #endif
2460 	}
2461 
2462 	/*
2463 	 * Copy data and free unused parts.  This should happen after all
2464 	 * allocations are complete; otherwise, we may end up with
2465 	 * overlapping groups.
2466 	 */
2467 	for (group = 0; group < ai->nr_groups; group++) {
2468 		struct pcpu_group_info *gi = &ai->groups[group];
2469 		void *ptr = areas[group];
2470 
2471 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2472 			if (gi->cpu_map[i] == NR_CPUS) {
2473 				/* unused unit, free whole */
2474 				free_fn(ptr, ai->unit_size);
2475 				continue;
2476 			}
2477 			/* copy and return the unused part */
2478 			memcpy(ptr, __per_cpu_load, ai->static_size);
2479 			free_fn(ptr + size_sum, ai->unit_size - size_sum);
2480 		}
2481 	}
2482 
2483 	/* base address is now known, determine group base offsets */
2484 	for (group = 0; group < ai->nr_groups; group++) {
2485 		ai->groups[group].base_offset = areas[group] - base;
2486 	}
2487 
2488 	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2489 		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2490 		ai->dyn_size, ai->unit_size);
2491 
2492 	rc = pcpu_setup_first_chunk(ai, base);
2493 	goto out_free;
2494 
2495 out_free_areas:
2496 	for (group = 0; group < ai->nr_groups; group++)
2497 		if (areas[group])
2498 			free_fn(areas[group],
2499 				ai->groups[group].nr_units * ai->unit_size);
2500 out_free:
2501 	pcpu_free_alloc_info(ai);
2502 	if (areas)
2503 		memblock_free_early(__pa(areas), areas_size);
2504 	return rc;
2505 }
2506 #endif /* BUILD_EMBED_FIRST_CHUNK */
2507 
2508 #ifdef BUILD_PAGE_FIRST_CHUNK
2509 /**
2510  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2511  * @reserved_size: the size of reserved percpu area in bytes
2512  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2513  * @free_fn: function to free percpu page, always called with PAGE_SIZE
2514  * @populate_pte_fn: function to populate pte
2515  *
2516  * This is a helper to ease setting up page-remapped first percpu
2517  * chunk and can be called where pcpu_setup_first_chunk() is expected.
2518  *
2519  * This is the basic allocator.  Static percpu area is allocated
2520  * page-by-page into vmalloc area.
2521  *
2522  * RETURNS:
2523  * 0 on success, -errno on failure.
2524  */
2525 int __init pcpu_page_first_chunk(size_t reserved_size,
2526 				 pcpu_fc_alloc_fn_t alloc_fn,
2527 				 pcpu_fc_free_fn_t free_fn,
2528 				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2529 {
2530 	static struct vm_struct vm;
2531 	struct pcpu_alloc_info *ai;
2532 	char psize_str[16];
2533 	int unit_pages;
2534 	size_t pages_size;
2535 	struct page **pages;
2536 	int unit, i, j, rc;
2537 	int upa;
2538 	int nr_g0_units;
2539 
2540 	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2541 
2542 	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2543 	if (IS_ERR(ai))
2544 		return PTR_ERR(ai);
2545 	BUG_ON(ai->nr_groups != 1);
2546 	upa = ai->alloc_size/ai->unit_size;
2547 	nr_g0_units = roundup(num_possible_cpus(), upa);
2548 	if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
2549 		pcpu_free_alloc_info(ai);
2550 		return -EINVAL;
2551 	}
2552 
2553 	unit_pages = ai->unit_size >> PAGE_SHIFT;
2554 
2555 	/* unaligned allocations can't be freed, round up to page size */
2556 	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2557 			       sizeof(pages[0]));
2558 	pages = memblock_virt_alloc(pages_size, 0);
2559 
2560 	/* allocate pages */
2561 	j = 0;
2562 	for (unit = 0; unit < num_possible_cpus(); unit++) {
2563 		unsigned int cpu = ai->groups[0].cpu_map[unit];
2564 		for (i = 0; i < unit_pages; i++) {
2565 			void *ptr;
2566 
2567 			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2568 			if (!ptr) {
2569 				pr_warn("failed to allocate %s page for cpu%u\n",
2570 						psize_str, cpu);
2571 				goto enomem;
2572 			}
2573 			/* kmemleak tracks the percpu allocations separately */
2574 			kmemleak_free(ptr);
2575 			pages[j++] = virt_to_page(ptr);
2576 		}
2577 	}
2578 
2579 	/* allocate vm area, map the pages and copy static data */
2580 	vm.flags = VM_ALLOC;
2581 	vm.size = num_possible_cpus() * ai->unit_size;
2582 	vm_area_register_early(&vm, PAGE_SIZE);
2583 
2584 	for (unit = 0; unit < num_possible_cpus(); unit++) {
2585 		unsigned long unit_addr =
2586 			(unsigned long)vm.addr + unit * ai->unit_size;
2587 
2588 		for (i = 0; i < unit_pages; i++)
2589 			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2590 
2591 		/* pte already populated, the following shouldn't fail */
2592 		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2593 				      unit_pages);
2594 		if (rc < 0)
2595 			panic("failed to map percpu area, err=%d\n", rc);
2596 
2597 		/*
2598 		 * FIXME: Archs with virtual cache should flush local
2599 		 * cache for the linear mapping here - something
2600 		 * equivalent to flush_cache_vmap() on the local cpu.
2601 		 * flush_cache_vmap() can't be used as most supporting
2602 		 * data structures are not set up yet.
2603 		 */
2604 
2605 		/* copy static data */
2606 		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2607 	}
2608 
2609 	/* we're ready, commit */
2610 	pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2611 		unit_pages, psize_str, vm.addr, ai->static_size,
2612 		ai->reserved_size, ai->dyn_size);
2613 
2614 	rc = pcpu_setup_first_chunk(ai, vm.addr);
2615 	goto out_free_ar;
2616 
2617 enomem:
2618 	while (--j >= 0)
2619 		free_fn(page_address(pages[j]), PAGE_SIZE);
2620 	rc = -ENOMEM;
2621 out_free_ar:
2622 	memblock_free_early(__pa(pages), pages_size);
2623 	pcpu_free_alloc_info(ai);
2624 	return rc;
2625 }
2626 #endif /* BUILD_PAGE_FIRST_CHUNK */
2627 
2628 #ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
2629 /*
2630  * Generic SMP percpu area setup.
2631  *
2632  * The embedding helper is used because its behavior closely resembles
2633  * the original non-dynamic generic percpu area setup.  This is
2634  * important because many archs have addressing restrictions and might
2635  * fail if the percpu area is located far away from the previous
2636  * location.  As an added bonus, in non-NUMA cases, embedding is
2637  * generally a good idea TLB-wise because percpu area can piggy back
2638  * on the physical linear memory mapping which uses large page
2639  * mappings on applicable archs.
2640  */
2641 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2642 EXPORT_SYMBOL(__per_cpu_offset);
2643 
2644 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2645 				       size_t align)
2646 {
2647 	return  memblock_virt_alloc_from_nopanic(
2648 			size, align, __pa(MAX_DMA_ADDRESS));
2649 }
2650 
2651 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2652 {
2653 	memblock_free_early(__pa(ptr), size);
2654 }
2655 
2656 void __init setup_per_cpu_areas(void)
2657 {
2658 	unsigned long delta;
2659 	unsigned int cpu;
2660 	int rc;
2661 
2662 	/*
2663 	 * Always reserve area for module percpu variables.  That's
2664 	 * what the legacy allocator did.
2665 	 */
2666 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2667 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2668 				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2669 	if (rc < 0)
2670 		panic("Failed to initialize percpu areas.");
2671 
2672 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2673 	for_each_possible_cpu(cpu)
2674 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2675 }
2676 #endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2677 
2678 #else	/* CONFIG_SMP */
2679 
2680 /*
2681  * UP percpu area setup.
2682  *
2683  * UP always uses km-based percpu allocator with identity mapping.
2684  * Static percpu variables are indistinguishable from the usual static
2685  * variables and don't require any special preparation.
2686  */
2687 void __init setup_per_cpu_areas(void)
2688 {
2689 	const size_t unit_size =
2690 		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2691 					 PERCPU_DYNAMIC_RESERVE));
2692 	struct pcpu_alloc_info *ai;
2693 	void *fc;
2694 
2695 	ai = pcpu_alloc_alloc_info(1, 1);
2696 	fc = memblock_virt_alloc_from_nopanic(unit_size,
2697 					      PAGE_SIZE,
2698 					      __pa(MAX_DMA_ADDRESS));
2699 	if (!ai || !fc)
2700 		panic("Failed to allocate memory for percpu areas.");
2701 	/* kmemleak tracks the percpu allocations separately */
2702 	kmemleak_free(fc);
2703 
2704 	ai->dyn_size = unit_size;
2705 	ai->unit_size = unit_size;
2706 	ai->atom_size = unit_size;
2707 	ai->alloc_size = unit_size;
2708 	ai->groups[0].nr_units = 1;
2709 	ai->groups[0].cpu_map[0] = 0;
2710 
2711 	if (pcpu_setup_first_chunk(ai, fc) < 0)
2712 		panic("Failed to initialize percpu areas.");
2713 }
2714 
2715 #endif	/* CONFIG_SMP */
2716 
2717 /*
2718  * Percpu allocator is initialized early during boot when neither slab or
2719  * workqueue is available.  Plug async management until everything is up
2720  * and running.
2721  */
2722 static int __init percpu_enable_async(void)
2723 {
2724 	pcpu_async_enabled = true;
2725 	return 0;
2726 }
2727 subsys_initcall(percpu_enable_async);
2728