xref: /openbmc/linux/mm/percpu.c (revision 239480ab)
1 /*
2  * mm/percpu.c - percpu memory allocator
3  *
4  * Copyright (C) 2009		SUSE Linux Products GmbH
5  * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6  *
7  * This file is released under the GPLv2.
8  *
9  * This is percpu allocator which can handle both static and dynamic
10  * areas.  Percpu areas are allocated in chunks.  Each chunk is
11  * consisted of boot-time determined number of units and the first
12  * chunk is used for static percpu variables in the kernel image
13  * (special boot time alloc/init handling necessary as these areas
14  * need to be brought up before allocation services are running).
15  * Unit grows as necessary and all units grow or shrink in unison.
16  * When a chunk is filled up, another chunk is allocated.
17  *
18  *  c0                           c1                         c2
19  *  -------------------          -------------------        ------------
20  * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
21  *  -------------------  ......  -------------------  ....  ------------
22  *
23  * Allocation is done in offset-size areas of single unit space.  Ie,
24  * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25  * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
26  * cpus.  On NUMA, the mapping can be non-linear and even sparse.
27  * Percpu access can be done by configuring percpu base registers
28  * according to cpu to unit mapping and pcpu_unit_size.
29  *
30  * There are usually many small percpu allocations many of them being
31  * as small as 4 bytes.  The allocator organizes chunks into lists
32  * according to free size and tries to allocate from the fullest one.
33  * Each chunk keeps the maximum contiguous area size hint which is
34  * guaranteed to be equal to or larger than the maximum contiguous
35  * area in the chunk.  This helps the allocator not to iterate the
36  * chunk maps unnecessarily.
37  *
38  * Allocation state in each chunk is kept using an array of integers
39  * on chunk->map.  A positive value in the map represents a free
40  * region and negative allocated.  Allocation inside a chunk is done
41  * by scanning this map sequentially and serving the first matching
42  * entry.  This is mostly copied from the percpu_modalloc() allocator.
43  * Chunks can be determined from the address using the index field
44  * in the page struct. The index field contains a pointer to the chunk.
45  *
46  * To use this allocator, arch code should do the following:
47  *
48  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49  *   regular address to percpu pointer and back if they need to be
50  *   different from the default
51  *
52  * - use pcpu_setup_first_chunk() during percpu area initialization to
53  *   setup the first chunk containing the kernel static percpu area
54  */
55 
56 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57 
58 #include <linux/bitmap.h>
59 #include <linux/bootmem.h>
60 #include <linux/err.h>
61 #include <linux/list.h>
62 #include <linux/log2.h>
63 #include <linux/mm.h>
64 #include <linux/module.h>
65 #include <linux/mutex.h>
66 #include <linux/percpu.h>
67 #include <linux/pfn.h>
68 #include <linux/slab.h>
69 #include <linux/spinlock.h>
70 #include <linux/vmalloc.h>
71 #include <linux/workqueue.h>
72 #include <linux/kmemleak.h>
73 
74 #include <asm/cacheflush.h>
75 #include <asm/sections.h>
76 #include <asm/tlbflush.h>
77 #include <asm/io.h>
78 
79 #define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
80 #define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
81 #define PCPU_ATOMIC_MAP_MARGIN_LOW	32
82 #define PCPU_ATOMIC_MAP_MARGIN_HIGH	64
83 #define PCPU_EMPTY_POP_PAGES_LOW	2
84 #define PCPU_EMPTY_POP_PAGES_HIGH	4
85 
86 #ifdef CONFIG_SMP
87 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
88 #ifndef __addr_to_pcpu_ptr
89 #define __addr_to_pcpu_ptr(addr)					\
90 	(void __percpu *)((unsigned long)(addr) -			\
91 			  (unsigned long)pcpu_base_addr	+		\
92 			  (unsigned long)__per_cpu_start)
93 #endif
94 #ifndef __pcpu_ptr_to_addr
95 #define __pcpu_ptr_to_addr(ptr)						\
96 	(void __force *)((unsigned long)(ptr) +				\
97 			 (unsigned long)pcpu_base_addr -		\
98 			 (unsigned long)__per_cpu_start)
99 #endif
100 #else	/* CONFIG_SMP */
101 /* on UP, it's always identity mapped */
102 #define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
103 #define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
104 #endif	/* CONFIG_SMP */
105 
106 struct pcpu_chunk {
107 	struct list_head	list;		/* linked to pcpu_slot lists */
108 	int			free_size;	/* free bytes in the chunk */
109 	int			contig_hint;	/* max contiguous size hint */
110 	void			*base_addr;	/* base address of this chunk */
111 
112 	int			map_used;	/* # of map entries used before the sentry */
113 	int			map_alloc;	/* # of map entries allocated */
114 	int			*map;		/* allocation map */
115 	struct list_head	map_extend_list;/* on pcpu_map_extend_chunks */
116 
117 	void			*data;		/* chunk data */
118 	int			first_free;	/* no free below this */
119 	bool			immutable;	/* no [de]population allowed */
120 	int			nr_populated;	/* # of populated pages */
121 	unsigned long		populated[];	/* populated bitmap */
122 };
123 
124 static int pcpu_unit_pages __read_mostly;
125 static int pcpu_unit_size __read_mostly;
126 static int pcpu_nr_units __read_mostly;
127 static int pcpu_atom_size __read_mostly;
128 static int pcpu_nr_slots __read_mostly;
129 static size_t pcpu_chunk_struct_size __read_mostly;
130 
131 /* cpus with the lowest and highest unit addresses */
132 static unsigned int pcpu_low_unit_cpu __read_mostly;
133 static unsigned int pcpu_high_unit_cpu __read_mostly;
134 
135 /* the address of the first chunk which starts with the kernel static area */
136 void *pcpu_base_addr __read_mostly;
137 EXPORT_SYMBOL_GPL(pcpu_base_addr);
138 
139 static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
140 const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
141 
142 /* group information, used for vm allocation */
143 static int pcpu_nr_groups __read_mostly;
144 static const unsigned long *pcpu_group_offsets __read_mostly;
145 static const size_t *pcpu_group_sizes __read_mostly;
146 
147 /*
148  * The first chunk which always exists.  Note that unlike other
149  * chunks, this one can be allocated and mapped in several different
150  * ways and thus often doesn't live in the vmalloc area.
151  */
152 static struct pcpu_chunk *pcpu_first_chunk;
153 
154 /*
155  * Optional reserved chunk.  This chunk reserves part of the first
156  * chunk and serves it for reserved allocations.  The amount of
157  * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
158  * area doesn't exist, the following variables contain NULL and 0
159  * respectively.
160  */
161 static struct pcpu_chunk *pcpu_reserved_chunk;
162 static int pcpu_reserved_chunk_limit;
163 
164 static DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
165 static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
166 
167 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
168 
169 /* chunks which need their map areas extended, protected by pcpu_lock */
170 static LIST_HEAD(pcpu_map_extend_chunks);
171 
172 /*
173  * The number of empty populated pages, protected by pcpu_lock.  The
174  * reserved chunk doesn't contribute to the count.
175  */
176 static int pcpu_nr_empty_pop_pages;
177 
178 /*
179  * Balance work is used to populate or destroy chunks asynchronously.  We
180  * try to keep the number of populated free pages between
181  * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
182  * empty chunk.
183  */
184 static void pcpu_balance_workfn(struct work_struct *work);
185 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
186 static bool pcpu_async_enabled __read_mostly;
187 static bool pcpu_atomic_alloc_failed;
188 
189 static void pcpu_schedule_balance_work(void)
190 {
191 	if (pcpu_async_enabled)
192 		schedule_work(&pcpu_balance_work);
193 }
194 
195 static bool pcpu_addr_in_first_chunk(void *addr)
196 {
197 	void *first_start = pcpu_first_chunk->base_addr;
198 
199 	return addr >= first_start && addr < first_start + pcpu_unit_size;
200 }
201 
202 static bool pcpu_addr_in_reserved_chunk(void *addr)
203 {
204 	void *first_start = pcpu_first_chunk->base_addr;
205 
206 	return addr >= first_start &&
207 		addr < first_start + pcpu_reserved_chunk_limit;
208 }
209 
210 static int __pcpu_size_to_slot(int size)
211 {
212 	int highbit = fls(size);	/* size is in bytes */
213 	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
214 }
215 
216 static int pcpu_size_to_slot(int size)
217 {
218 	if (size == pcpu_unit_size)
219 		return pcpu_nr_slots - 1;
220 	return __pcpu_size_to_slot(size);
221 }
222 
223 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
224 {
225 	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
226 		return 0;
227 
228 	return pcpu_size_to_slot(chunk->free_size);
229 }
230 
231 /* set the pointer to a chunk in a page struct */
232 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
233 {
234 	page->index = (unsigned long)pcpu;
235 }
236 
237 /* obtain pointer to a chunk from a page struct */
238 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
239 {
240 	return (struct pcpu_chunk *)page->index;
241 }
242 
243 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
244 {
245 	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
246 }
247 
248 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
249 				     unsigned int cpu, int page_idx)
250 {
251 	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
252 		(page_idx << PAGE_SHIFT);
253 }
254 
255 static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
256 					   int *rs, int *re, int end)
257 {
258 	*rs = find_next_zero_bit(chunk->populated, end, *rs);
259 	*re = find_next_bit(chunk->populated, end, *rs + 1);
260 }
261 
262 static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
263 					 int *rs, int *re, int end)
264 {
265 	*rs = find_next_bit(chunk->populated, end, *rs);
266 	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
267 }
268 
269 /*
270  * (Un)populated page region iterators.  Iterate over (un)populated
271  * page regions between @start and @end in @chunk.  @rs and @re should
272  * be integer variables and will be set to start and end page index of
273  * the current region.
274  */
275 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
276 	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
277 	     (rs) < (re);						    \
278 	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
279 
280 #define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
281 	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
282 	     (rs) < (re);						    \
283 	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
284 
285 /**
286  * pcpu_mem_zalloc - allocate memory
287  * @size: bytes to allocate
288  *
289  * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
290  * kzalloc() is used; otherwise, vzalloc() is used.  The returned
291  * memory is always zeroed.
292  *
293  * CONTEXT:
294  * Does GFP_KERNEL allocation.
295  *
296  * RETURNS:
297  * Pointer to the allocated area on success, NULL on failure.
298  */
299 static void *pcpu_mem_zalloc(size_t size)
300 {
301 	if (WARN_ON_ONCE(!slab_is_available()))
302 		return NULL;
303 
304 	if (size <= PAGE_SIZE)
305 		return kzalloc(size, GFP_KERNEL);
306 	else
307 		return vzalloc(size);
308 }
309 
310 /**
311  * pcpu_mem_free - free memory
312  * @ptr: memory to free
313  *
314  * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
315  */
316 static void pcpu_mem_free(void *ptr)
317 {
318 	kvfree(ptr);
319 }
320 
321 /**
322  * pcpu_count_occupied_pages - count the number of pages an area occupies
323  * @chunk: chunk of interest
324  * @i: index of the area in question
325  *
326  * Count the number of pages chunk's @i'th area occupies.  When the area's
327  * start and/or end address isn't aligned to page boundary, the straddled
328  * page is included in the count iff the rest of the page is free.
329  */
330 static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
331 {
332 	int off = chunk->map[i] & ~1;
333 	int end = chunk->map[i + 1] & ~1;
334 
335 	if (!PAGE_ALIGNED(off) && i > 0) {
336 		int prev = chunk->map[i - 1];
337 
338 		if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
339 			off = round_down(off, PAGE_SIZE);
340 	}
341 
342 	if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
343 		int next = chunk->map[i + 1];
344 		int nend = chunk->map[i + 2] & ~1;
345 
346 		if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
347 			end = round_up(end, PAGE_SIZE);
348 	}
349 
350 	return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
351 }
352 
353 /**
354  * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
355  * @chunk: chunk of interest
356  * @oslot: the previous slot it was on
357  *
358  * This function is called after an allocation or free changed @chunk.
359  * New slot according to the changed state is determined and @chunk is
360  * moved to the slot.  Note that the reserved chunk is never put on
361  * chunk slots.
362  *
363  * CONTEXT:
364  * pcpu_lock.
365  */
366 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
367 {
368 	int nslot = pcpu_chunk_slot(chunk);
369 
370 	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
371 		if (oslot < nslot)
372 			list_move(&chunk->list, &pcpu_slot[nslot]);
373 		else
374 			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
375 	}
376 }
377 
378 /**
379  * pcpu_need_to_extend - determine whether chunk area map needs to be extended
380  * @chunk: chunk of interest
381  * @is_atomic: the allocation context
382  *
383  * Determine whether area map of @chunk needs to be extended.  If
384  * @is_atomic, only the amount necessary for a new allocation is
385  * considered; however, async extension is scheduled if the left amount is
386  * low.  If !@is_atomic, it aims for more empty space.  Combined, this
387  * ensures that the map is likely to have enough available space to
388  * accomodate atomic allocations which can't extend maps directly.
389  *
390  * CONTEXT:
391  * pcpu_lock.
392  *
393  * RETURNS:
394  * New target map allocation length if extension is necessary, 0
395  * otherwise.
396  */
397 static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
398 {
399 	int margin, new_alloc;
400 
401 	lockdep_assert_held(&pcpu_lock);
402 
403 	if (is_atomic) {
404 		margin = 3;
405 
406 		if (chunk->map_alloc <
407 		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
408 			if (list_empty(&chunk->map_extend_list)) {
409 				list_add_tail(&chunk->map_extend_list,
410 					      &pcpu_map_extend_chunks);
411 				pcpu_schedule_balance_work();
412 			}
413 		}
414 	} else {
415 		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
416 	}
417 
418 	if (chunk->map_alloc >= chunk->map_used + margin)
419 		return 0;
420 
421 	new_alloc = PCPU_DFL_MAP_ALLOC;
422 	while (new_alloc < chunk->map_used + margin)
423 		new_alloc *= 2;
424 
425 	return new_alloc;
426 }
427 
428 /**
429  * pcpu_extend_area_map - extend area map of a chunk
430  * @chunk: chunk of interest
431  * @new_alloc: new target allocation length of the area map
432  *
433  * Extend area map of @chunk to have @new_alloc entries.
434  *
435  * CONTEXT:
436  * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
437  *
438  * RETURNS:
439  * 0 on success, -errno on failure.
440  */
441 static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
442 {
443 	int *old = NULL, *new = NULL;
444 	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
445 	unsigned long flags;
446 
447 	lockdep_assert_held(&pcpu_alloc_mutex);
448 
449 	new = pcpu_mem_zalloc(new_size);
450 	if (!new)
451 		return -ENOMEM;
452 
453 	/* acquire pcpu_lock and switch to new area map */
454 	spin_lock_irqsave(&pcpu_lock, flags);
455 
456 	if (new_alloc <= chunk->map_alloc)
457 		goto out_unlock;
458 
459 	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
460 	old = chunk->map;
461 
462 	memcpy(new, old, old_size);
463 
464 	chunk->map_alloc = new_alloc;
465 	chunk->map = new;
466 	new = NULL;
467 
468 out_unlock:
469 	spin_unlock_irqrestore(&pcpu_lock, flags);
470 
471 	/*
472 	 * pcpu_mem_free() might end up calling vfree() which uses
473 	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
474 	 */
475 	pcpu_mem_free(old);
476 	pcpu_mem_free(new);
477 
478 	return 0;
479 }
480 
481 /**
482  * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
483  * @chunk: chunk the candidate area belongs to
484  * @off: the offset to the start of the candidate area
485  * @this_size: the size of the candidate area
486  * @size: the size of the target allocation
487  * @align: the alignment of the target allocation
488  * @pop_only: only allocate from already populated region
489  *
490  * We're trying to allocate @size bytes aligned at @align.  @chunk's area
491  * at @off sized @this_size is a candidate.  This function determines
492  * whether the target allocation fits in the candidate area and returns the
493  * number of bytes to pad after @off.  If the target area doesn't fit, -1
494  * is returned.
495  *
496  * If @pop_only is %true, this function only considers the already
497  * populated part of the candidate area.
498  */
499 static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
500 			    int size, int align, bool pop_only)
501 {
502 	int cand_off = off;
503 
504 	while (true) {
505 		int head = ALIGN(cand_off, align) - off;
506 		int page_start, page_end, rs, re;
507 
508 		if (this_size < head + size)
509 			return -1;
510 
511 		if (!pop_only)
512 			return head;
513 
514 		/*
515 		 * If the first unpopulated page is beyond the end of the
516 		 * allocation, the whole allocation is populated;
517 		 * otherwise, retry from the end of the unpopulated area.
518 		 */
519 		page_start = PFN_DOWN(head + off);
520 		page_end = PFN_UP(head + off + size);
521 
522 		rs = page_start;
523 		pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
524 		if (rs >= page_end)
525 			return head;
526 		cand_off = re * PAGE_SIZE;
527 	}
528 }
529 
530 /**
531  * pcpu_alloc_area - allocate area from a pcpu_chunk
532  * @chunk: chunk of interest
533  * @size: wanted size in bytes
534  * @align: wanted align
535  * @pop_only: allocate only from the populated area
536  * @occ_pages_p: out param for the number of pages the area occupies
537  *
538  * Try to allocate @size bytes area aligned at @align from @chunk.
539  * Note that this function only allocates the offset.  It doesn't
540  * populate or map the area.
541  *
542  * @chunk->map must have at least two free slots.
543  *
544  * CONTEXT:
545  * pcpu_lock.
546  *
547  * RETURNS:
548  * Allocated offset in @chunk on success, -1 if no matching area is
549  * found.
550  */
551 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
552 			   bool pop_only, int *occ_pages_p)
553 {
554 	int oslot = pcpu_chunk_slot(chunk);
555 	int max_contig = 0;
556 	int i, off;
557 	bool seen_free = false;
558 	int *p;
559 
560 	for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
561 		int head, tail;
562 		int this_size;
563 
564 		off = *p;
565 		if (off & 1)
566 			continue;
567 
568 		this_size = (p[1] & ~1) - off;
569 
570 		head = pcpu_fit_in_area(chunk, off, this_size, size, align,
571 					pop_only);
572 		if (head < 0) {
573 			if (!seen_free) {
574 				chunk->first_free = i;
575 				seen_free = true;
576 			}
577 			max_contig = max(this_size, max_contig);
578 			continue;
579 		}
580 
581 		/*
582 		 * If head is small or the previous block is free,
583 		 * merge'em.  Note that 'small' is defined as smaller
584 		 * than sizeof(int), which is very small but isn't too
585 		 * uncommon for percpu allocations.
586 		 */
587 		if (head && (head < sizeof(int) || !(p[-1] & 1))) {
588 			*p = off += head;
589 			if (p[-1] & 1)
590 				chunk->free_size -= head;
591 			else
592 				max_contig = max(*p - p[-1], max_contig);
593 			this_size -= head;
594 			head = 0;
595 		}
596 
597 		/* if tail is small, just keep it around */
598 		tail = this_size - head - size;
599 		if (tail < sizeof(int)) {
600 			tail = 0;
601 			size = this_size - head;
602 		}
603 
604 		/* split if warranted */
605 		if (head || tail) {
606 			int nr_extra = !!head + !!tail;
607 
608 			/* insert new subblocks */
609 			memmove(p + nr_extra + 1, p + 1,
610 				sizeof(chunk->map[0]) * (chunk->map_used - i));
611 			chunk->map_used += nr_extra;
612 
613 			if (head) {
614 				if (!seen_free) {
615 					chunk->first_free = i;
616 					seen_free = true;
617 				}
618 				*++p = off += head;
619 				++i;
620 				max_contig = max(head, max_contig);
621 			}
622 			if (tail) {
623 				p[1] = off + size;
624 				max_contig = max(tail, max_contig);
625 			}
626 		}
627 
628 		if (!seen_free)
629 			chunk->first_free = i + 1;
630 
631 		/* update hint and mark allocated */
632 		if (i + 1 == chunk->map_used)
633 			chunk->contig_hint = max_contig; /* fully scanned */
634 		else
635 			chunk->contig_hint = max(chunk->contig_hint,
636 						 max_contig);
637 
638 		chunk->free_size -= size;
639 		*p |= 1;
640 
641 		*occ_pages_p = pcpu_count_occupied_pages(chunk, i);
642 		pcpu_chunk_relocate(chunk, oslot);
643 		return off;
644 	}
645 
646 	chunk->contig_hint = max_contig;	/* fully scanned */
647 	pcpu_chunk_relocate(chunk, oslot);
648 
649 	/* tell the upper layer that this chunk has no matching area */
650 	return -1;
651 }
652 
653 /**
654  * pcpu_free_area - free area to a pcpu_chunk
655  * @chunk: chunk of interest
656  * @freeme: offset of area to free
657  * @occ_pages_p: out param for the number of pages the area occupies
658  *
659  * Free area starting from @freeme to @chunk.  Note that this function
660  * only modifies the allocation map.  It doesn't depopulate or unmap
661  * the area.
662  *
663  * CONTEXT:
664  * pcpu_lock.
665  */
666 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
667 			   int *occ_pages_p)
668 {
669 	int oslot = pcpu_chunk_slot(chunk);
670 	int off = 0;
671 	unsigned i, j;
672 	int to_free = 0;
673 	int *p;
674 
675 	freeme |= 1;	/* we are searching for <given offset, in use> pair */
676 
677 	i = 0;
678 	j = chunk->map_used;
679 	while (i != j) {
680 		unsigned k = (i + j) / 2;
681 		off = chunk->map[k];
682 		if (off < freeme)
683 			i = k + 1;
684 		else if (off > freeme)
685 			j = k;
686 		else
687 			i = j = k;
688 	}
689 	BUG_ON(off != freeme);
690 
691 	if (i < chunk->first_free)
692 		chunk->first_free = i;
693 
694 	p = chunk->map + i;
695 	*p = off &= ~1;
696 	chunk->free_size += (p[1] & ~1) - off;
697 
698 	*occ_pages_p = pcpu_count_occupied_pages(chunk, i);
699 
700 	/* merge with next? */
701 	if (!(p[1] & 1))
702 		to_free++;
703 	/* merge with previous? */
704 	if (i > 0 && !(p[-1] & 1)) {
705 		to_free++;
706 		i--;
707 		p--;
708 	}
709 	if (to_free) {
710 		chunk->map_used -= to_free;
711 		memmove(p + 1, p + 1 + to_free,
712 			(chunk->map_used - i) * sizeof(chunk->map[0]));
713 	}
714 
715 	chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
716 	pcpu_chunk_relocate(chunk, oslot);
717 }
718 
719 static struct pcpu_chunk *pcpu_alloc_chunk(void)
720 {
721 	struct pcpu_chunk *chunk;
722 
723 	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
724 	if (!chunk)
725 		return NULL;
726 
727 	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
728 						sizeof(chunk->map[0]));
729 	if (!chunk->map) {
730 		pcpu_mem_free(chunk);
731 		return NULL;
732 	}
733 
734 	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
735 	chunk->map[0] = 0;
736 	chunk->map[1] = pcpu_unit_size | 1;
737 	chunk->map_used = 1;
738 
739 	INIT_LIST_HEAD(&chunk->list);
740 	INIT_LIST_HEAD(&chunk->map_extend_list);
741 	chunk->free_size = pcpu_unit_size;
742 	chunk->contig_hint = pcpu_unit_size;
743 
744 	return chunk;
745 }
746 
747 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
748 {
749 	if (!chunk)
750 		return;
751 	pcpu_mem_free(chunk->map);
752 	pcpu_mem_free(chunk);
753 }
754 
755 /**
756  * pcpu_chunk_populated - post-population bookkeeping
757  * @chunk: pcpu_chunk which got populated
758  * @page_start: the start page
759  * @page_end: the end page
760  *
761  * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
762  * the bookkeeping information accordingly.  Must be called after each
763  * successful population.
764  */
765 static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
766 				 int page_start, int page_end)
767 {
768 	int nr = page_end - page_start;
769 
770 	lockdep_assert_held(&pcpu_lock);
771 
772 	bitmap_set(chunk->populated, page_start, nr);
773 	chunk->nr_populated += nr;
774 	pcpu_nr_empty_pop_pages += nr;
775 }
776 
777 /**
778  * pcpu_chunk_depopulated - post-depopulation bookkeeping
779  * @chunk: pcpu_chunk which got depopulated
780  * @page_start: the start page
781  * @page_end: the end page
782  *
783  * Pages in [@page_start,@page_end) have been depopulated from @chunk.
784  * Update the bookkeeping information accordingly.  Must be called after
785  * each successful depopulation.
786  */
787 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
788 				   int page_start, int page_end)
789 {
790 	int nr = page_end - page_start;
791 
792 	lockdep_assert_held(&pcpu_lock);
793 
794 	bitmap_clear(chunk->populated, page_start, nr);
795 	chunk->nr_populated -= nr;
796 	pcpu_nr_empty_pop_pages -= nr;
797 }
798 
799 /*
800  * Chunk management implementation.
801  *
802  * To allow different implementations, chunk alloc/free and
803  * [de]population are implemented in a separate file which is pulled
804  * into this file and compiled together.  The following functions
805  * should be implemented.
806  *
807  * pcpu_populate_chunk		- populate the specified range of a chunk
808  * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
809  * pcpu_create_chunk		- create a new chunk
810  * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
811  * pcpu_addr_to_page		- translate address to physical address
812  * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
813  */
814 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
815 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
816 static struct pcpu_chunk *pcpu_create_chunk(void);
817 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
818 static struct page *pcpu_addr_to_page(void *addr);
819 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
820 
821 #ifdef CONFIG_NEED_PER_CPU_KM
822 #include "percpu-km.c"
823 #else
824 #include "percpu-vm.c"
825 #endif
826 
827 /**
828  * pcpu_chunk_addr_search - determine chunk containing specified address
829  * @addr: address for which the chunk needs to be determined.
830  *
831  * RETURNS:
832  * The address of the found chunk.
833  */
834 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
835 {
836 	/* is it in the first chunk? */
837 	if (pcpu_addr_in_first_chunk(addr)) {
838 		/* is it in the reserved area? */
839 		if (pcpu_addr_in_reserved_chunk(addr))
840 			return pcpu_reserved_chunk;
841 		return pcpu_first_chunk;
842 	}
843 
844 	/*
845 	 * The address is relative to unit0 which might be unused and
846 	 * thus unmapped.  Offset the address to the unit space of the
847 	 * current processor before looking it up in the vmalloc
848 	 * space.  Note that any possible cpu id can be used here, so
849 	 * there's no need to worry about preemption or cpu hotplug.
850 	 */
851 	addr += pcpu_unit_offsets[raw_smp_processor_id()];
852 	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
853 }
854 
855 /**
856  * pcpu_alloc - the percpu allocator
857  * @size: size of area to allocate in bytes
858  * @align: alignment of area (max PAGE_SIZE)
859  * @reserved: allocate from the reserved chunk if available
860  * @gfp: allocation flags
861  *
862  * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
863  * contain %GFP_KERNEL, the allocation is atomic.
864  *
865  * RETURNS:
866  * Percpu pointer to the allocated area on success, NULL on failure.
867  */
868 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
869 				 gfp_t gfp)
870 {
871 	static int warn_limit = 10;
872 	struct pcpu_chunk *chunk;
873 	const char *err;
874 	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
875 	int occ_pages = 0;
876 	int slot, off, new_alloc, cpu, ret;
877 	unsigned long flags;
878 	void __percpu *ptr;
879 
880 	/*
881 	 * We want the lowest bit of offset available for in-use/free
882 	 * indicator, so force >= 16bit alignment and make size even.
883 	 */
884 	if (unlikely(align < 2))
885 		align = 2;
886 
887 	size = ALIGN(size, 2);
888 
889 	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
890 		     !is_power_of_2(align))) {
891 		WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
892 		     size, align);
893 		return NULL;
894 	}
895 
896 	if (!is_atomic)
897 		mutex_lock(&pcpu_alloc_mutex);
898 
899 	spin_lock_irqsave(&pcpu_lock, flags);
900 
901 	/* serve reserved allocations from the reserved chunk if available */
902 	if (reserved && pcpu_reserved_chunk) {
903 		chunk = pcpu_reserved_chunk;
904 
905 		if (size > chunk->contig_hint) {
906 			err = "alloc from reserved chunk failed";
907 			goto fail_unlock;
908 		}
909 
910 		while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
911 			spin_unlock_irqrestore(&pcpu_lock, flags);
912 			if (is_atomic ||
913 			    pcpu_extend_area_map(chunk, new_alloc) < 0) {
914 				err = "failed to extend area map of reserved chunk";
915 				goto fail;
916 			}
917 			spin_lock_irqsave(&pcpu_lock, flags);
918 		}
919 
920 		off = pcpu_alloc_area(chunk, size, align, is_atomic,
921 				      &occ_pages);
922 		if (off >= 0)
923 			goto area_found;
924 
925 		err = "alloc from reserved chunk failed";
926 		goto fail_unlock;
927 	}
928 
929 restart:
930 	/* search through normal chunks */
931 	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
932 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
933 			if (size > chunk->contig_hint)
934 				continue;
935 
936 			new_alloc = pcpu_need_to_extend(chunk, is_atomic);
937 			if (new_alloc) {
938 				if (is_atomic)
939 					continue;
940 				spin_unlock_irqrestore(&pcpu_lock, flags);
941 				if (pcpu_extend_area_map(chunk,
942 							 new_alloc) < 0) {
943 					err = "failed to extend area map";
944 					goto fail;
945 				}
946 				spin_lock_irqsave(&pcpu_lock, flags);
947 				/*
948 				 * pcpu_lock has been dropped, need to
949 				 * restart cpu_slot list walking.
950 				 */
951 				goto restart;
952 			}
953 
954 			off = pcpu_alloc_area(chunk, size, align, is_atomic,
955 					      &occ_pages);
956 			if (off >= 0)
957 				goto area_found;
958 		}
959 	}
960 
961 	spin_unlock_irqrestore(&pcpu_lock, flags);
962 
963 	/*
964 	 * No space left.  Create a new chunk.  We don't want multiple
965 	 * tasks to create chunks simultaneously.  Serialize and create iff
966 	 * there's still no empty chunk after grabbing the mutex.
967 	 */
968 	if (is_atomic)
969 		goto fail;
970 
971 	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
972 		chunk = pcpu_create_chunk();
973 		if (!chunk) {
974 			err = "failed to allocate new chunk";
975 			goto fail;
976 		}
977 
978 		spin_lock_irqsave(&pcpu_lock, flags);
979 		pcpu_chunk_relocate(chunk, -1);
980 	} else {
981 		spin_lock_irqsave(&pcpu_lock, flags);
982 	}
983 
984 	goto restart;
985 
986 area_found:
987 	spin_unlock_irqrestore(&pcpu_lock, flags);
988 
989 	/* populate if not all pages are already there */
990 	if (!is_atomic) {
991 		int page_start, page_end, rs, re;
992 
993 		page_start = PFN_DOWN(off);
994 		page_end = PFN_UP(off + size);
995 
996 		pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
997 			WARN_ON(chunk->immutable);
998 
999 			ret = pcpu_populate_chunk(chunk, rs, re);
1000 
1001 			spin_lock_irqsave(&pcpu_lock, flags);
1002 			if (ret) {
1003 				pcpu_free_area(chunk, off, &occ_pages);
1004 				err = "failed to populate";
1005 				goto fail_unlock;
1006 			}
1007 			pcpu_chunk_populated(chunk, rs, re);
1008 			spin_unlock_irqrestore(&pcpu_lock, flags);
1009 		}
1010 
1011 		mutex_unlock(&pcpu_alloc_mutex);
1012 	}
1013 
1014 	if (chunk != pcpu_reserved_chunk) {
1015 		spin_lock_irqsave(&pcpu_lock, flags);
1016 		pcpu_nr_empty_pop_pages -= occ_pages;
1017 		spin_unlock_irqrestore(&pcpu_lock, flags);
1018 	}
1019 
1020 	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1021 		pcpu_schedule_balance_work();
1022 
1023 	/* clear the areas and return address relative to base address */
1024 	for_each_possible_cpu(cpu)
1025 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1026 
1027 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1028 	kmemleak_alloc_percpu(ptr, size, gfp);
1029 	return ptr;
1030 
1031 fail_unlock:
1032 	spin_unlock_irqrestore(&pcpu_lock, flags);
1033 fail:
1034 	if (!is_atomic && warn_limit) {
1035 		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1036 			size, align, is_atomic, err);
1037 		dump_stack();
1038 		if (!--warn_limit)
1039 			pr_info("limit reached, disable warning\n");
1040 	}
1041 	if (is_atomic) {
1042 		/* see the flag handling in pcpu_blance_workfn() */
1043 		pcpu_atomic_alloc_failed = true;
1044 		pcpu_schedule_balance_work();
1045 	} else {
1046 		mutex_unlock(&pcpu_alloc_mutex);
1047 	}
1048 	return NULL;
1049 }
1050 
1051 /**
1052  * __alloc_percpu_gfp - allocate dynamic percpu area
1053  * @size: size of area to allocate in bytes
1054  * @align: alignment of area (max PAGE_SIZE)
1055  * @gfp: allocation flags
1056  *
1057  * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
1058  * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1059  * be called from any context but is a lot more likely to fail.
1060  *
1061  * RETURNS:
1062  * Percpu pointer to the allocated area on success, NULL on failure.
1063  */
1064 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1065 {
1066 	return pcpu_alloc(size, align, false, gfp);
1067 }
1068 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1069 
1070 /**
1071  * __alloc_percpu - allocate dynamic percpu area
1072  * @size: size of area to allocate in bytes
1073  * @align: alignment of area (max PAGE_SIZE)
1074  *
1075  * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1076  */
1077 void __percpu *__alloc_percpu(size_t size, size_t align)
1078 {
1079 	return pcpu_alloc(size, align, false, GFP_KERNEL);
1080 }
1081 EXPORT_SYMBOL_GPL(__alloc_percpu);
1082 
1083 /**
1084  * __alloc_reserved_percpu - allocate reserved percpu area
1085  * @size: size of area to allocate in bytes
1086  * @align: alignment of area (max PAGE_SIZE)
1087  *
1088  * Allocate zero-filled percpu area of @size bytes aligned at @align
1089  * from reserved percpu area if arch has set it up; otherwise,
1090  * allocation is served from the same dynamic area.  Might sleep.
1091  * Might trigger writeouts.
1092  *
1093  * CONTEXT:
1094  * Does GFP_KERNEL allocation.
1095  *
1096  * RETURNS:
1097  * Percpu pointer to the allocated area on success, NULL on failure.
1098  */
1099 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1100 {
1101 	return pcpu_alloc(size, align, true, GFP_KERNEL);
1102 }
1103 
1104 /**
1105  * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1106  * @work: unused
1107  *
1108  * Reclaim all fully free chunks except for the first one.
1109  */
1110 static void pcpu_balance_workfn(struct work_struct *work)
1111 {
1112 	LIST_HEAD(to_free);
1113 	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1114 	struct pcpu_chunk *chunk, *next;
1115 	int slot, nr_to_pop, ret;
1116 
1117 	/*
1118 	 * There's no reason to keep around multiple unused chunks and VM
1119 	 * areas can be scarce.  Destroy all free chunks except for one.
1120 	 */
1121 	mutex_lock(&pcpu_alloc_mutex);
1122 	spin_lock_irq(&pcpu_lock);
1123 
1124 	list_for_each_entry_safe(chunk, next, free_head, list) {
1125 		WARN_ON(chunk->immutable);
1126 
1127 		/* spare the first one */
1128 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1129 			continue;
1130 
1131 		list_del_init(&chunk->map_extend_list);
1132 		list_move(&chunk->list, &to_free);
1133 	}
1134 
1135 	spin_unlock_irq(&pcpu_lock);
1136 
1137 	list_for_each_entry_safe(chunk, next, &to_free, list) {
1138 		int rs, re;
1139 
1140 		pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1141 			pcpu_depopulate_chunk(chunk, rs, re);
1142 			spin_lock_irq(&pcpu_lock);
1143 			pcpu_chunk_depopulated(chunk, rs, re);
1144 			spin_unlock_irq(&pcpu_lock);
1145 		}
1146 		pcpu_destroy_chunk(chunk);
1147 	}
1148 
1149 	/* service chunks which requested async area map extension */
1150 	do {
1151 		int new_alloc = 0;
1152 
1153 		spin_lock_irq(&pcpu_lock);
1154 
1155 		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
1156 					struct pcpu_chunk, map_extend_list);
1157 		if (chunk) {
1158 			list_del_init(&chunk->map_extend_list);
1159 			new_alloc = pcpu_need_to_extend(chunk, false);
1160 		}
1161 
1162 		spin_unlock_irq(&pcpu_lock);
1163 
1164 		if (new_alloc)
1165 			pcpu_extend_area_map(chunk, new_alloc);
1166 	} while (chunk);
1167 
1168 	/*
1169 	 * Ensure there are certain number of free populated pages for
1170 	 * atomic allocs.  Fill up from the most packed so that atomic
1171 	 * allocs don't increase fragmentation.  If atomic allocation
1172 	 * failed previously, always populate the maximum amount.  This
1173 	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1174 	 * failing indefinitely; however, large atomic allocs are not
1175 	 * something we support properly and can be highly unreliable and
1176 	 * inefficient.
1177 	 */
1178 retry_pop:
1179 	if (pcpu_atomic_alloc_failed) {
1180 		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1181 		/* best effort anyway, don't worry about synchronization */
1182 		pcpu_atomic_alloc_failed = false;
1183 	} else {
1184 		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1185 				  pcpu_nr_empty_pop_pages,
1186 				  0, PCPU_EMPTY_POP_PAGES_HIGH);
1187 	}
1188 
1189 	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1190 		int nr_unpop = 0, rs, re;
1191 
1192 		if (!nr_to_pop)
1193 			break;
1194 
1195 		spin_lock_irq(&pcpu_lock);
1196 		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1197 			nr_unpop = pcpu_unit_pages - chunk->nr_populated;
1198 			if (nr_unpop)
1199 				break;
1200 		}
1201 		spin_unlock_irq(&pcpu_lock);
1202 
1203 		if (!nr_unpop)
1204 			continue;
1205 
1206 		/* @chunk can't go away while pcpu_alloc_mutex is held */
1207 		pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1208 			int nr = min(re - rs, nr_to_pop);
1209 
1210 			ret = pcpu_populate_chunk(chunk, rs, rs + nr);
1211 			if (!ret) {
1212 				nr_to_pop -= nr;
1213 				spin_lock_irq(&pcpu_lock);
1214 				pcpu_chunk_populated(chunk, rs, rs + nr);
1215 				spin_unlock_irq(&pcpu_lock);
1216 			} else {
1217 				nr_to_pop = 0;
1218 			}
1219 
1220 			if (!nr_to_pop)
1221 				break;
1222 		}
1223 	}
1224 
1225 	if (nr_to_pop) {
1226 		/* ran out of chunks to populate, create a new one and retry */
1227 		chunk = pcpu_create_chunk();
1228 		if (chunk) {
1229 			spin_lock_irq(&pcpu_lock);
1230 			pcpu_chunk_relocate(chunk, -1);
1231 			spin_unlock_irq(&pcpu_lock);
1232 			goto retry_pop;
1233 		}
1234 	}
1235 
1236 	mutex_unlock(&pcpu_alloc_mutex);
1237 }
1238 
1239 /**
1240  * free_percpu - free percpu area
1241  * @ptr: pointer to area to free
1242  *
1243  * Free percpu area @ptr.
1244  *
1245  * CONTEXT:
1246  * Can be called from atomic context.
1247  */
1248 void free_percpu(void __percpu *ptr)
1249 {
1250 	void *addr;
1251 	struct pcpu_chunk *chunk;
1252 	unsigned long flags;
1253 	int off, occ_pages;
1254 
1255 	if (!ptr)
1256 		return;
1257 
1258 	kmemleak_free_percpu(ptr);
1259 
1260 	addr = __pcpu_ptr_to_addr(ptr);
1261 
1262 	spin_lock_irqsave(&pcpu_lock, flags);
1263 
1264 	chunk = pcpu_chunk_addr_search(addr);
1265 	off = addr - chunk->base_addr;
1266 
1267 	pcpu_free_area(chunk, off, &occ_pages);
1268 
1269 	if (chunk != pcpu_reserved_chunk)
1270 		pcpu_nr_empty_pop_pages += occ_pages;
1271 
1272 	/* if there are more than one fully free chunks, wake up grim reaper */
1273 	if (chunk->free_size == pcpu_unit_size) {
1274 		struct pcpu_chunk *pos;
1275 
1276 		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1277 			if (pos != chunk) {
1278 				pcpu_schedule_balance_work();
1279 				break;
1280 			}
1281 	}
1282 
1283 	spin_unlock_irqrestore(&pcpu_lock, flags);
1284 }
1285 EXPORT_SYMBOL_GPL(free_percpu);
1286 
1287 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1288 {
1289 #ifdef CONFIG_SMP
1290 	const size_t static_size = __per_cpu_end - __per_cpu_start;
1291 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1292 	unsigned int cpu;
1293 
1294 	for_each_possible_cpu(cpu) {
1295 		void *start = per_cpu_ptr(base, cpu);
1296 		void *va = (void *)addr;
1297 
1298 		if (va >= start && va < start + static_size) {
1299 			if (can_addr) {
1300 				*can_addr = (unsigned long) (va - start);
1301 				*can_addr += (unsigned long)
1302 					per_cpu_ptr(base, get_boot_cpu_id());
1303 			}
1304 			return true;
1305 		}
1306 	}
1307 #endif
1308 	/* on UP, can't distinguish from other static vars, always false */
1309 	return false;
1310 }
1311 
1312 /**
1313  * is_kernel_percpu_address - test whether address is from static percpu area
1314  * @addr: address to test
1315  *
1316  * Test whether @addr belongs to in-kernel static percpu area.  Module
1317  * static percpu areas are not considered.  For those, use
1318  * is_module_percpu_address().
1319  *
1320  * RETURNS:
1321  * %true if @addr is from in-kernel static percpu area, %false otherwise.
1322  */
1323 bool is_kernel_percpu_address(unsigned long addr)
1324 {
1325 	return __is_kernel_percpu_address(addr, NULL);
1326 }
1327 
1328 /**
1329  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1330  * @addr: the address to be converted to physical address
1331  *
1332  * Given @addr which is dereferenceable address obtained via one of
1333  * percpu access macros, this function translates it into its physical
1334  * address.  The caller is responsible for ensuring @addr stays valid
1335  * until this function finishes.
1336  *
1337  * percpu allocator has special setup for the first chunk, which currently
1338  * supports either embedding in linear address space or vmalloc mapping,
1339  * and, from the second one, the backing allocator (currently either vm or
1340  * km) provides translation.
1341  *
1342  * The addr can be translated simply without checking if it falls into the
1343  * first chunk. But the current code reflects better how percpu allocator
1344  * actually works, and the verification can discover both bugs in percpu
1345  * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1346  * code.
1347  *
1348  * RETURNS:
1349  * The physical address for @addr.
1350  */
1351 phys_addr_t per_cpu_ptr_to_phys(void *addr)
1352 {
1353 	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1354 	bool in_first_chunk = false;
1355 	unsigned long first_low, first_high;
1356 	unsigned int cpu;
1357 
1358 	/*
1359 	 * The following test on unit_low/high isn't strictly
1360 	 * necessary but will speed up lookups of addresses which
1361 	 * aren't in the first chunk.
1362 	 */
1363 	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1364 	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1365 				     pcpu_unit_pages);
1366 	if ((unsigned long)addr >= first_low &&
1367 	    (unsigned long)addr < first_high) {
1368 		for_each_possible_cpu(cpu) {
1369 			void *start = per_cpu_ptr(base, cpu);
1370 
1371 			if (addr >= start && addr < start + pcpu_unit_size) {
1372 				in_first_chunk = true;
1373 				break;
1374 			}
1375 		}
1376 	}
1377 
1378 	if (in_first_chunk) {
1379 		if (!is_vmalloc_addr(addr))
1380 			return __pa(addr);
1381 		else
1382 			return page_to_phys(vmalloc_to_page(addr)) +
1383 			       offset_in_page(addr);
1384 	} else
1385 		return page_to_phys(pcpu_addr_to_page(addr)) +
1386 		       offset_in_page(addr);
1387 }
1388 
1389 /**
1390  * pcpu_alloc_alloc_info - allocate percpu allocation info
1391  * @nr_groups: the number of groups
1392  * @nr_units: the number of units
1393  *
1394  * Allocate ai which is large enough for @nr_groups groups containing
1395  * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1396  * cpu_map array which is long enough for @nr_units and filled with
1397  * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1398  * pointer of other groups.
1399  *
1400  * RETURNS:
1401  * Pointer to the allocated pcpu_alloc_info on success, NULL on
1402  * failure.
1403  */
1404 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1405 						      int nr_units)
1406 {
1407 	struct pcpu_alloc_info *ai;
1408 	size_t base_size, ai_size;
1409 	void *ptr;
1410 	int unit;
1411 
1412 	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1413 			  __alignof__(ai->groups[0].cpu_map[0]));
1414 	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1415 
1416 	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1417 	if (!ptr)
1418 		return NULL;
1419 	ai = ptr;
1420 	ptr += base_size;
1421 
1422 	ai->groups[0].cpu_map = ptr;
1423 
1424 	for (unit = 0; unit < nr_units; unit++)
1425 		ai->groups[0].cpu_map[unit] = NR_CPUS;
1426 
1427 	ai->nr_groups = nr_groups;
1428 	ai->__ai_size = PFN_ALIGN(ai_size);
1429 
1430 	return ai;
1431 }
1432 
1433 /**
1434  * pcpu_free_alloc_info - free percpu allocation info
1435  * @ai: pcpu_alloc_info to free
1436  *
1437  * Free @ai which was allocated by pcpu_alloc_alloc_info().
1438  */
1439 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1440 {
1441 	memblock_free_early(__pa(ai), ai->__ai_size);
1442 }
1443 
1444 /**
1445  * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1446  * @lvl: loglevel
1447  * @ai: allocation info to dump
1448  *
1449  * Print out information about @ai using loglevel @lvl.
1450  */
1451 static void pcpu_dump_alloc_info(const char *lvl,
1452 				 const struct pcpu_alloc_info *ai)
1453 {
1454 	int group_width = 1, cpu_width = 1, width;
1455 	char empty_str[] = "--------";
1456 	int alloc = 0, alloc_end = 0;
1457 	int group, v;
1458 	int upa, apl;	/* units per alloc, allocs per line */
1459 
1460 	v = ai->nr_groups;
1461 	while (v /= 10)
1462 		group_width++;
1463 
1464 	v = num_possible_cpus();
1465 	while (v /= 10)
1466 		cpu_width++;
1467 	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1468 
1469 	upa = ai->alloc_size / ai->unit_size;
1470 	width = upa * (cpu_width + 1) + group_width + 3;
1471 	apl = rounddown_pow_of_two(max(60 / width, 1));
1472 
1473 	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1474 	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1475 	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1476 
1477 	for (group = 0; group < ai->nr_groups; group++) {
1478 		const struct pcpu_group_info *gi = &ai->groups[group];
1479 		int unit = 0, unit_end = 0;
1480 
1481 		BUG_ON(gi->nr_units % upa);
1482 		for (alloc_end += gi->nr_units / upa;
1483 		     alloc < alloc_end; alloc++) {
1484 			if (!(alloc % apl)) {
1485 				pr_cont("\n");
1486 				printk("%spcpu-alloc: ", lvl);
1487 			}
1488 			pr_cont("[%0*d] ", group_width, group);
1489 
1490 			for (unit_end += upa; unit < unit_end; unit++)
1491 				if (gi->cpu_map[unit] != NR_CPUS)
1492 					pr_cont("%0*d ",
1493 						cpu_width, gi->cpu_map[unit]);
1494 				else
1495 					pr_cont("%s ", empty_str);
1496 		}
1497 	}
1498 	pr_cont("\n");
1499 }
1500 
1501 /**
1502  * pcpu_setup_first_chunk - initialize the first percpu chunk
1503  * @ai: pcpu_alloc_info describing how to percpu area is shaped
1504  * @base_addr: mapped address
1505  *
1506  * Initialize the first percpu chunk which contains the kernel static
1507  * perpcu area.  This function is to be called from arch percpu area
1508  * setup path.
1509  *
1510  * @ai contains all information necessary to initialize the first
1511  * chunk and prime the dynamic percpu allocator.
1512  *
1513  * @ai->static_size is the size of static percpu area.
1514  *
1515  * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1516  * reserve after the static area in the first chunk.  This reserves
1517  * the first chunk such that it's available only through reserved
1518  * percpu allocation.  This is primarily used to serve module percpu
1519  * static areas on architectures where the addressing model has
1520  * limited offset range for symbol relocations to guarantee module
1521  * percpu symbols fall inside the relocatable range.
1522  *
1523  * @ai->dyn_size determines the number of bytes available for dynamic
1524  * allocation in the first chunk.  The area between @ai->static_size +
1525  * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1526  *
1527  * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1528  * and equal to or larger than @ai->static_size + @ai->reserved_size +
1529  * @ai->dyn_size.
1530  *
1531  * @ai->atom_size is the allocation atom size and used as alignment
1532  * for vm areas.
1533  *
1534  * @ai->alloc_size is the allocation size and always multiple of
1535  * @ai->atom_size.  This is larger than @ai->atom_size if
1536  * @ai->unit_size is larger than @ai->atom_size.
1537  *
1538  * @ai->nr_groups and @ai->groups describe virtual memory layout of
1539  * percpu areas.  Units which should be colocated are put into the
1540  * same group.  Dynamic VM areas will be allocated according to these
1541  * groupings.  If @ai->nr_groups is zero, a single group containing
1542  * all units is assumed.
1543  *
1544  * The caller should have mapped the first chunk at @base_addr and
1545  * copied static data to each unit.
1546  *
1547  * If the first chunk ends up with both reserved and dynamic areas, it
1548  * is served by two chunks - one to serve the core static and reserved
1549  * areas and the other for the dynamic area.  They share the same vm
1550  * and page map but uses different area allocation map to stay away
1551  * from each other.  The latter chunk is circulated in the chunk slots
1552  * and available for dynamic allocation like any other chunks.
1553  *
1554  * RETURNS:
1555  * 0 on success, -errno on failure.
1556  */
1557 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1558 				  void *base_addr)
1559 {
1560 	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1561 	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1562 	size_t dyn_size = ai->dyn_size;
1563 	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1564 	struct pcpu_chunk *schunk, *dchunk = NULL;
1565 	unsigned long *group_offsets;
1566 	size_t *group_sizes;
1567 	unsigned long *unit_off;
1568 	unsigned int cpu;
1569 	int *unit_map;
1570 	int group, unit, i;
1571 
1572 #define PCPU_SETUP_BUG_ON(cond)	do {					\
1573 	if (unlikely(cond)) {						\
1574 		pr_emerg("failed to initialize, %s\n", #cond);		\
1575 		pr_emerg("cpu_possible_mask=%*pb\n",			\
1576 			 cpumask_pr_args(cpu_possible_mask));		\
1577 		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1578 		BUG();							\
1579 	}								\
1580 } while (0)
1581 
1582 	/* sanity checks */
1583 	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1584 #ifdef CONFIG_SMP
1585 	PCPU_SETUP_BUG_ON(!ai->static_size);
1586 	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
1587 #endif
1588 	PCPU_SETUP_BUG_ON(!base_addr);
1589 	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
1590 	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1591 	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
1592 	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1593 	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1594 	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1595 
1596 	/* process group information and build config tables accordingly */
1597 	group_offsets = memblock_virt_alloc(ai->nr_groups *
1598 					     sizeof(group_offsets[0]), 0);
1599 	group_sizes = memblock_virt_alloc(ai->nr_groups *
1600 					   sizeof(group_sizes[0]), 0);
1601 	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1602 	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1603 
1604 	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1605 		unit_map[cpu] = UINT_MAX;
1606 
1607 	pcpu_low_unit_cpu = NR_CPUS;
1608 	pcpu_high_unit_cpu = NR_CPUS;
1609 
1610 	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1611 		const struct pcpu_group_info *gi = &ai->groups[group];
1612 
1613 		group_offsets[group] = gi->base_offset;
1614 		group_sizes[group] = gi->nr_units * ai->unit_size;
1615 
1616 		for (i = 0; i < gi->nr_units; i++) {
1617 			cpu = gi->cpu_map[i];
1618 			if (cpu == NR_CPUS)
1619 				continue;
1620 
1621 			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1622 			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1623 			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1624 
1625 			unit_map[cpu] = unit + i;
1626 			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1627 
1628 			/* determine low/high unit_cpu */
1629 			if (pcpu_low_unit_cpu == NR_CPUS ||
1630 			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1631 				pcpu_low_unit_cpu = cpu;
1632 			if (pcpu_high_unit_cpu == NR_CPUS ||
1633 			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1634 				pcpu_high_unit_cpu = cpu;
1635 		}
1636 	}
1637 	pcpu_nr_units = unit;
1638 
1639 	for_each_possible_cpu(cpu)
1640 		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1641 
1642 	/* we're done parsing the input, undefine BUG macro and dump config */
1643 #undef PCPU_SETUP_BUG_ON
1644 	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1645 
1646 	pcpu_nr_groups = ai->nr_groups;
1647 	pcpu_group_offsets = group_offsets;
1648 	pcpu_group_sizes = group_sizes;
1649 	pcpu_unit_map = unit_map;
1650 	pcpu_unit_offsets = unit_off;
1651 
1652 	/* determine basic parameters */
1653 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1654 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1655 	pcpu_atom_size = ai->atom_size;
1656 	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1657 		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1658 
1659 	/*
1660 	 * Allocate chunk slots.  The additional last slot is for
1661 	 * empty chunks.
1662 	 */
1663 	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1664 	pcpu_slot = memblock_virt_alloc(
1665 			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1666 	for (i = 0; i < pcpu_nr_slots; i++)
1667 		INIT_LIST_HEAD(&pcpu_slot[i]);
1668 
1669 	/*
1670 	 * Initialize static chunk.  If reserved_size is zero, the
1671 	 * static chunk covers static area + dynamic allocation area
1672 	 * in the first chunk.  If reserved_size is not zero, it
1673 	 * covers static area + reserved area (mostly used for module
1674 	 * static percpu allocation).
1675 	 */
1676 	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1677 	INIT_LIST_HEAD(&schunk->list);
1678 	INIT_LIST_HEAD(&schunk->map_extend_list);
1679 	schunk->base_addr = base_addr;
1680 	schunk->map = smap;
1681 	schunk->map_alloc = ARRAY_SIZE(smap);
1682 	schunk->immutable = true;
1683 	bitmap_fill(schunk->populated, pcpu_unit_pages);
1684 	schunk->nr_populated = pcpu_unit_pages;
1685 
1686 	if (ai->reserved_size) {
1687 		schunk->free_size = ai->reserved_size;
1688 		pcpu_reserved_chunk = schunk;
1689 		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1690 	} else {
1691 		schunk->free_size = dyn_size;
1692 		dyn_size = 0;			/* dynamic area covered */
1693 	}
1694 	schunk->contig_hint = schunk->free_size;
1695 
1696 	schunk->map[0] = 1;
1697 	schunk->map[1] = ai->static_size;
1698 	schunk->map_used = 1;
1699 	if (schunk->free_size)
1700 		schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
1701 	schunk->map[schunk->map_used] |= 1;
1702 
1703 	/* init dynamic chunk if necessary */
1704 	if (dyn_size) {
1705 		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1706 		INIT_LIST_HEAD(&dchunk->list);
1707 		INIT_LIST_HEAD(&dchunk->map_extend_list);
1708 		dchunk->base_addr = base_addr;
1709 		dchunk->map = dmap;
1710 		dchunk->map_alloc = ARRAY_SIZE(dmap);
1711 		dchunk->immutable = true;
1712 		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1713 		dchunk->nr_populated = pcpu_unit_pages;
1714 
1715 		dchunk->contig_hint = dchunk->free_size = dyn_size;
1716 		dchunk->map[0] = 1;
1717 		dchunk->map[1] = pcpu_reserved_chunk_limit;
1718 		dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
1719 		dchunk->map_used = 2;
1720 	}
1721 
1722 	/* link the first chunk in */
1723 	pcpu_first_chunk = dchunk ?: schunk;
1724 	pcpu_nr_empty_pop_pages +=
1725 		pcpu_count_occupied_pages(pcpu_first_chunk, 1);
1726 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1727 
1728 	/* we're done */
1729 	pcpu_base_addr = base_addr;
1730 	return 0;
1731 }
1732 
1733 #ifdef CONFIG_SMP
1734 
1735 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1736 	[PCPU_FC_AUTO]	= "auto",
1737 	[PCPU_FC_EMBED]	= "embed",
1738 	[PCPU_FC_PAGE]	= "page",
1739 };
1740 
1741 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1742 
1743 static int __init percpu_alloc_setup(char *str)
1744 {
1745 	if (!str)
1746 		return -EINVAL;
1747 
1748 	if (0)
1749 		/* nada */;
1750 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1751 	else if (!strcmp(str, "embed"))
1752 		pcpu_chosen_fc = PCPU_FC_EMBED;
1753 #endif
1754 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1755 	else if (!strcmp(str, "page"))
1756 		pcpu_chosen_fc = PCPU_FC_PAGE;
1757 #endif
1758 	else
1759 		pr_warn("unknown allocator %s specified\n", str);
1760 
1761 	return 0;
1762 }
1763 early_param("percpu_alloc", percpu_alloc_setup);
1764 
1765 /*
1766  * pcpu_embed_first_chunk() is used by the generic percpu setup.
1767  * Build it if needed by the arch config or the generic setup is going
1768  * to be used.
1769  */
1770 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1771 	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1772 #define BUILD_EMBED_FIRST_CHUNK
1773 #endif
1774 
1775 /* build pcpu_page_first_chunk() iff needed by the arch config */
1776 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1777 #define BUILD_PAGE_FIRST_CHUNK
1778 #endif
1779 
1780 /* pcpu_build_alloc_info() is used by both embed and page first chunk */
1781 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1782 /**
1783  * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1784  * @reserved_size: the size of reserved percpu area in bytes
1785  * @dyn_size: minimum free size for dynamic allocation in bytes
1786  * @atom_size: allocation atom size
1787  * @cpu_distance_fn: callback to determine distance between cpus, optional
1788  *
1789  * This function determines grouping of units, their mappings to cpus
1790  * and other parameters considering needed percpu size, allocation
1791  * atom size and distances between CPUs.
1792  *
1793  * Groups are always multiples of atom size and CPUs which are of
1794  * LOCAL_DISTANCE both ways are grouped together and share space for
1795  * units in the same group.  The returned configuration is guaranteed
1796  * to have CPUs on different nodes on different groups and >=75% usage
1797  * of allocated virtual address space.
1798  *
1799  * RETURNS:
1800  * On success, pointer to the new allocation_info is returned.  On
1801  * failure, ERR_PTR value is returned.
1802  */
1803 static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1804 				size_t reserved_size, size_t dyn_size,
1805 				size_t atom_size,
1806 				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1807 {
1808 	static int group_map[NR_CPUS] __initdata;
1809 	static int group_cnt[NR_CPUS] __initdata;
1810 	const size_t static_size = __per_cpu_end - __per_cpu_start;
1811 	int nr_groups = 1, nr_units = 0;
1812 	size_t size_sum, min_unit_size, alloc_size;
1813 	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1814 	int last_allocs, group, unit;
1815 	unsigned int cpu, tcpu;
1816 	struct pcpu_alloc_info *ai;
1817 	unsigned int *cpu_map;
1818 
1819 	/* this function may be called multiple times */
1820 	memset(group_map, 0, sizeof(group_map));
1821 	memset(group_cnt, 0, sizeof(group_cnt));
1822 
1823 	/* calculate size_sum and ensure dyn_size is enough for early alloc */
1824 	size_sum = PFN_ALIGN(static_size + reserved_size +
1825 			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1826 	dyn_size = size_sum - static_size - reserved_size;
1827 
1828 	/*
1829 	 * Determine min_unit_size, alloc_size and max_upa such that
1830 	 * alloc_size is multiple of atom_size and is the smallest
1831 	 * which can accommodate 4k aligned segments which are equal to
1832 	 * or larger than min_unit_size.
1833 	 */
1834 	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1835 
1836 	alloc_size = roundup(min_unit_size, atom_size);
1837 	upa = alloc_size / min_unit_size;
1838 	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1839 		upa--;
1840 	max_upa = upa;
1841 
1842 	/* group cpus according to their proximity */
1843 	for_each_possible_cpu(cpu) {
1844 		group = 0;
1845 	next_group:
1846 		for_each_possible_cpu(tcpu) {
1847 			if (cpu == tcpu)
1848 				break;
1849 			if (group_map[tcpu] == group && cpu_distance_fn &&
1850 			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1851 			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1852 				group++;
1853 				nr_groups = max(nr_groups, group + 1);
1854 				goto next_group;
1855 			}
1856 		}
1857 		group_map[cpu] = group;
1858 		group_cnt[group]++;
1859 	}
1860 
1861 	/*
1862 	 * Expand unit size until address space usage goes over 75%
1863 	 * and then as much as possible without using more address
1864 	 * space.
1865 	 */
1866 	last_allocs = INT_MAX;
1867 	for (upa = max_upa; upa; upa--) {
1868 		int allocs = 0, wasted = 0;
1869 
1870 		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1871 			continue;
1872 
1873 		for (group = 0; group < nr_groups; group++) {
1874 			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1875 			allocs += this_allocs;
1876 			wasted += this_allocs * upa - group_cnt[group];
1877 		}
1878 
1879 		/*
1880 		 * Don't accept if wastage is over 1/3.  The
1881 		 * greater-than comparison ensures upa==1 always
1882 		 * passes the following check.
1883 		 */
1884 		if (wasted > num_possible_cpus() / 3)
1885 			continue;
1886 
1887 		/* and then don't consume more memory */
1888 		if (allocs > last_allocs)
1889 			break;
1890 		last_allocs = allocs;
1891 		best_upa = upa;
1892 	}
1893 	upa = best_upa;
1894 
1895 	/* allocate and fill alloc_info */
1896 	for (group = 0; group < nr_groups; group++)
1897 		nr_units += roundup(group_cnt[group], upa);
1898 
1899 	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1900 	if (!ai)
1901 		return ERR_PTR(-ENOMEM);
1902 	cpu_map = ai->groups[0].cpu_map;
1903 
1904 	for (group = 0; group < nr_groups; group++) {
1905 		ai->groups[group].cpu_map = cpu_map;
1906 		cpu_map += roundup(group_cnt[group], upa);
1907 	}
1908 
1909 	ai->static_size = static_size;
1910 	ai->reserved_size = reserved_size;
1911 	ai->dyn_size = dyn_size;
1912 	ai->unit_size = alloc_size / upa;
1913 	ai->atom_size = atom_size;
1914 	ai->alloc_size = alloc_size;
1915 
1916 	for (group = 0, unit = 0; group_cnt[group]; group++) {
1917 		struct pcpu_group_info *gi = &ai->groups[group];
1918 
1919 		/*
1920 		 * Initialize base_offset as if all groups are located
1921 		 * back-to-back.  The caller should update this to
1922 		 * reflect actual allocation.
1923 		 */
1924 		gi->base_offset = unit * ai->unit_size;
1925 
1926 		for_each_possible_cpu(cpu)
1927 			if (group_map[cpu] == group)
1928 				gi->cpu_map[gi->nr_units++] = cpu;
1929 		gi->nr_units = roundup(gi->nr_units, upa);
1930 		unit += gi->nr_units;
1931 	}
1932 	BUG_ON(unit != nr_units);
1933 
1934 	return ai;
1935 }
1936 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1937 
1938 #if defined(BUILD_EMBED_FIRST_CHUNK)
1939 /**
1940  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1941  * @reserved_size: the size of reserved percpu area in bytes
1942  * @dyn_size: minimum free size for dynamic allocation in bytes
1943  * @atom_size: allocation atom size
1944  * @cpu_distance_fn: callback to determine distance between cpus, optional
1945  * @alloc_fn: function to allocate percpu page
1946  * @free_fn: function to free percpu page
1947  *
1948  * This is a helper to ease setting up embedded first percpu chunk and
1949  * can be called where pcpu_setup_first_chunk() is expected.
1950  *
1951  * If this function is used to setup the first chunk, it is allocated
1952  * by calling @alloc_fn and used as-is without being mapped into
1953  * vmalloc area.  Allocations are always whole multiples of @atom_size
1954  * aligned to @atom_size.
1955  *
1956  * This enables the first chunk to piggy back on the linear physical
1957  * mapping which often uses larger page size.  Please note that this
1958  * can result in very sparse cpu->unit mapping on NUMA machines thus
1959  * requiring large vmalloc address space.  Don't use this allocator if
1960  * vmalloc space is not orders of magnitude larger than distances
1961  * between node memory addresses (ie. 32bit NUMA machines).
1962  *
1963  * @dyn_size specifies the minimum dynamic area size.
1964  *
1965  * If the needed size is smaller than the minimum or specified unit
1966  * size, the leftover is returned using @free_fn.
1967  *
1968  * RETURNS:
1969  * 0 on success, -errno on failure.
1970  */
1971 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1972 				  size_t atom_size,
1973 				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1974 				  pcpu_fc_alloc_fn_t alloc_fn,
1975 				  pcpu_fc_free_fn_t free_fn)
1976 {
1977 	void *base = (void *)ULONG_MAX;
1978 	void **areas = NULL;
1979 	struct pcpu_alloc_info *ai;
1980 	size_t size_sum, areas_size;
1981 	unsigned long max_distance;
1982 	int group, i, highest_group, rc;
1983 
1984 	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1985 				   cpu_distance_fn);
1986 	if (IS_ERR(ai))
1987 		return PTR_ERR(ai);
1988 
1989 	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1990 	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1991 
1992 	areas = memblock_virt_alloc_nopanic(areas_size, 0);
1993 	if (!areas) {
1994 		rc = -ENOMEM;
1995 		goto out_free;
1996 	}
1997 
1998 	/* allocate, copy and determine base address & max_distance */
1999 	highest_group = 0;
2000 	for (group = 0; group < ai->nr_groups; group++) {
2001 		struct pcpu_group_info *gi = &ai->groups[group];
2002 		unsigned int cpu = NR_CPUS;
2003 		void *ptr;
2004 
2005 		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2006 			cpu = gi->cpu_map[i];
2007 		BUG_ON(cpu == NR_CPUS);
2008 
2009 		/* allocate space for the whole group */
2010 		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2011 		if (!ptr) {
2012 			rc = -ENOMEM;
2013 			goto out_free_areas;
2014 		}
2015 		/* kmemleak tracks the percpu allocations separately */
2016 		kmemleak_free(ptr);
2017 		areas[group] = ptr;
2018 
2019 		base = min(ptr, base);
2020 		if (ptr > areas[highest_group])
2021 			highest_group = group;
2022 	}
2023 	max_distance = areas[highest_group] - base;
2024 	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2025 
2026 	/* warn if maximum distance is further than 75% of vmalloc space */
2027 	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2028 		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2029 				max_distance, VMALLOC_TOTAL);
2030 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2031 		/* and fail if we have fallback */
2032 		rc = -EINVAL;
2033 		goto out_free_areas;
2034 #endif
2035 	}
2036 
2037 	/*
2038 	 * Copy data and free unused parts.  This should happen after all
2039 	 * allocations are complete; otherwise, we may end up with
2040 	 * overlapping groups.
2041 	 */
2042 	for (group = 0; group < ai->nr_groups; group++) {
2043 		struct pcpu_group_info *gi = &ai->groups[group];
2044 		void *ptr = areas[group];
2045 
2046 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2047 			if (gi->cpu_map[i] == NR_CPUS) {
2048 				/* unused unit, free whole */
2049 				free_fn(ptr, ai->unit_size);
2050 				continue;
2051 			}
2052 			/* copy and return the unused part */
2053 			memcpy(ptr, __per_cpu_load, ai->static_size);
2054 			free_fn(ptr + size_sum, ai->unit_size - size_sum);
2055 		}
2056 	}
2057 
2058 	/* base address is now known, determine group base offsets */
2059 	for (group = 0; group < ai->nr_groups; group++) {
2060 		ai->groups[group].base_offset = areas[group] - base;
2061 	}
2062 
2063 	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2064 		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2065 		ai->dyn_size, ai->unit_size);
2066 
2067 	rc = pcpu_setup_first_chunk(ai, base);
2068 	goto out_free;
2069 
2070 out_free_areas:
2071 	for (group = 0; group < ai->nr_groups; group++)
2072 		if (areas[group])
2073 			free_fn(areas[group],
2074 				ai->groups[group].nr_units * ai->unit_size);
2075 out_free:
2076 	pcpu_free_alloc_info(ai);
2077 	if (areas)
2078 		memblock_free_early(__pa(areas), areas_size);
2079 	return rc;
2080 }
2081 #endif /* BUILD_EMBED_FIRST_CHUNK */
2082 
2083 #ifdef BUILD_PAGE_FIRST_CHUNK
2084 /**
2085  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2086  * @reserved_size: the size of reserved percpu area in bytes
2087  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2088  * @free_fn: function to free percpu page, always called with PAGE_SIZE
2089  * @populate_pte_fn: function to populate pte
2090  *
2091  * This is a helper to ease setting up page-remapped first percpu
2092  * chunk and can be called where pcpu_setup_first_chunk() is expected.
2093  *
2094  * This is the basic allocator.  Static percpu area is allocated
2095  * page-by-page into vmalloc area.
2096  *
2097  * RETURNS:
2098  * 0 on success, -errno on failure.
2099  */
2100 int __init pcpu_page_first_chunk(size_t reserved_size,
2101 				 pcpu_fc_alloc_fn_t alloc_fn,
2102 				 pcpu_fc_free_fn_t free_fn,
2103 				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2104 {
2105 	static struct vm_struct vm;
2106 	struct pcpu_alloc_info *ai;
2107 	char psize_str[16];
2108 	int unit_pages;
2109 	size_t pages_size;
2110 	struct page **pages;
2111 	int unit, i, j, rc;
2112 	int upa;
2113 	int nr_g0_units;
2114 
2115 	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2116 
2117 	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2118 	if (IS_ERR(ai))
2119 		return PTR_ERR(ai);
2120 	BUG_ON(ai->nr_groups != 1);
2121 	upa = ai->alloc_size/ai->unit_size;
2122 	nr_g0_units = roundup(num_possible_cpus(), upa);
2123 	if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
2124 		pcpu_free_alloc_info(ai);
2125 		return -EINVAL;
2126 	}
2127 
2128 	unit_pages = ai->unit_size >> PAGE_SHIFT;
2129 
2130 	/* unaligned allocations can't be freed, round up to page size */
2131 	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2132 			       sizeof(pages[0]));
2133 	pages = memblock_virt_alloc(pages_size, 0);
2134 
2135 	/* allocate pages */
2136 	j = 0;
2137 	for (unit = 0; unit < num_possible_cpus(); unit++) {
2138 		unsigned int cpu = ai->groups[0].cpu_map[unit];
2139 		for (i = 0; i < unit_pages; i++) {
2140 			void *ptr;
2141 
2142 			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2143 			if (!ptr) {
2144 				pr_warn("failed to allocate %s page for cpu%u\n",
2145 						psize_str, cpu);
2146 				goto enomem;
2147 			}
2148 			/* kmemleak tracks the percpu allocations separately */
2149 			kmemleak_free(ptr);
2150 			pages[j++] = virt_to_page(ptr);
2151 		}
2152 	}
2153 
2154 	/* allocate vm area, map the pages and copy static data */
2155 	vm.flags = VM_ALLOC;
2156 	vm.size = num_possible_cpus() * ai->unit_size;
2157 	vm_area_register_early(&vm, PAGE_SIZE);
2158 
2159 	for (unit = 0; unit < num_possible_cpus(); unit++) {
2160 		unsigned long unit_addr =
2161 			(unsigned long)vm.addr + unit * ai->unit_size;
2162 
2163 		for (i = 0; i < unit_pages; i++)
2164 			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2165 
2166 		/* pte already populated, the following shouldn't fail */
2167 		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2168 				      unit_pages);
2169 		if (rc < 0)
2170 			panic("failed to map percpu area, err=%d\n", rc);
2171 
2172 		/*
2173 		 * FIXME: Archs with virtual cache should flush local
2174 		 * cache for the linear mapping here - something
2175 		 * equivalent to flush_cache_vmap() on the local cpu.
2176 		 * flush_cache_vmap() can't be used as most supporting
2177 		 * data structures are not set up yet.
2178 		 */
2179 
2180 		/* copy static data */
2181 		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2182 	}
2183 
2184 	/* we're ready, commit */
2185 	pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2186 		unit_pages, psize_str, vm.addr, ai->static_size,
2187 		ai->reserved_size, ai->dyn_size);
2188 
2189 	rc = pcpu_setup_first_chunk(ai, vm.addr);
2190 	goto out_free_ar;
2191 
2192 enomem:
2193 	while (--j >= 0)
2194 		free_fn(page_address(pages[j]), PAGE_SIZE);
2195 	rc = -ENOMEM;
2196 out_free_ar:
2197 	memblock_free_early(__pa(pages), pages_size);
2198 	pcpu_free_alloc_info(ai);
2199 	return rc;
2200 }
2201 #endif /* BUILD_PAGE_FIRST_CHUNK */
2202 
2203 #ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
2204 /*
2205  * Generic SMP percpu area setup.
2206  *
2207  * The embedding helper is used because its behavior closely resembles
2208  * the original non-dynamic generic percpu area setup.  This is
2209  * important because many archs have addressing restrictions and might
2210  * fail if the percpu area is located far away from the previous
2211  * location.  As an added bonus, in non-NUMA cases, embedding is
2212  * generally a good idea TLB-wise because percpu area can piggy back
2213  * on the physical linear memory mapping which uses large page
2214  * mappings on applicable archs.
2215  */
2216 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2217 EXPORT_SYMBOL(__per_cpu_offset);
2218 
2219 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2220 				       size_t align)
2221 {
2222 	return  memblock_virt_alloc_from_nopanic(
2223 			size, align, __pa(MAX_DMA_ADDRESS));
2224 }
2225 
2226 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2227 {
2228 	memblock_free_early(__pa(ptr), size);
2229 }
2230 
2231 void __init setup_per_cpu_areas(void)
2232 {
2233 	unsigned long delta;
2234 	unsigned int cpu;
2235 	int rc;
2236 
2237 	/*
2238 	 * Always reserve area for module percpu variables.  That's
2239 	 * what the legacy allocator did.
2240 	 */
2241 	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2242 				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2243 				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2244 	if (rc < 0)
2245 		panic("Failed to initialize percpu areas.");
2246 
2247 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2248 	for_each_possible_cpu(cpu)
2249 		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2250 }
2251 #endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2252 
2253 #else	/* CONFIG_SMP */
2254 
2255 /*
2256  * UP percpu area setup.
2257  *
2258  * UP always uses km-based percpu allocator with identity mapping.
2259  * Static percpu variables are indistinguishable from the usual static
2260  * variables and don't require any special preparation.
2261  */
2262 void __init setup_per_cpu_areas(void)
2263 {
2264 	const size_t unit_size =
2265 		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2266 					 PERCPU_DYNAMIC_RESERVE));
2267 	struct pcpu_alloc_info *ai;
2268 	void *fc;
2269 
2270 	ai = pcpu_alloc_alloc_info(1, 1);
2271 	fc = memblock_virt_alloc_from_nopanic(unit_size,
2272 					      PAGE_SIZE,
2273 					      __pa(MAX_DMA_ADDRESS));
2274 	if (!ai || !fc)
2275 		panic("Failed to allocate memory for percpu areas.");
2276 	/* kmemleak tracks the percpu allocations separately */
2277 	kmemleak_free(fc);
2278 
2279 	ai->dyn_size = unit_size;
2280 	ai->unit_size = unit_size;
2281 	ai->atom_size = unit_size;
2282 	ai->alloc_size = unit_size;
2283 	ai->groups[0].nr_units = 1;
2284 	ai->groups[0].cpu_map[0] = 0;
2285 
2286 	if (pcpu_setup_first_chunk(ai, fc) < 0)
2287 		panic("Failed to initialize percpu areas.");
2288 }
2289 
2290 #endif	/* CONFIG_SMP */
2291 
2292 /*
2293  * First and reserved chunks are initialized with temporary allocation
2294  * map in initdata so that they can be used before slab is online.
2295  * This function is called after slab is brought up and replaces those
2296  * with properly allocated maps.
2297  */
2298 void __init percpu_init_late(void)
2299 {
2300 	struct pcpu_chunk *target_chunks[] =
2301 		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
2302 	struct pcpu_chunk *chunk;
2303 	unsigned long flags;
2304 	int i;
2305 
2306 	for (i = 0; (chunk = target_chunks[i]); i++) {
2307 		int *map;
2308 		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
2309 
2310 		BUILD_BUG_ON(size > PAGE_SIZE);
2311 
2312 		map = pcpu_mem_zalloc(size);
2313 		BUG_ON(!map);
2314 
2315 		spin_lock_irqsave(&pcpu_lock, flags);
2316 		memcpy(map, chunk->map, size);
2317 		chunk->map = map;
2318 		spin_unlock_irqrestore(&pcpu_lock, flags);
2319 	}
2320 }
2321 
2322 /*
2323  * Percpu allocator is initialized early during boot when neither slab or
2324  * workqueue is available.  Plug async management until everything is up
2325  * and running.
2326  */
2327 static int __init percpu_enable_async(void)
2328 {
2329 	pcpu_async_enabled = true;
2330 	return 0;
2331 }
2332 subsys_initcall(percpu_enable_async);
2333