xref: /openbmc/linux/mm/vmalloc.c (revision 01cc2ec6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/vmalloc.c
4  *
5  *  Copyright (C) 1993  Linus Torvalds
6  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
8  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
9  *  Numa awareness, Christoph Lameter, SGI, June 2005
10  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
11  */
12 
13 #include <linux/vmalloc.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/highmem.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/set_memory.h>
24 #include <linux/debugobjects.h>
25 #include <linux/kallsyms.h>
26 #include <linux/list.h>
27 #include <linux/notifier.h>
28 #include <linux/rbtree.h>
29 #include <linux/xarray.h>
30 #include <linux/rcupdate.h>
31 #include <linux/pfn.h>
32 #include <linux/kmemleak.h>
33 #include <linux/atomic.h>
34 #include <linux/compiler.h>
35 #include <linux/llist.h>
36 #include <linux/bitops.h>
37 #include <linux/rbtree_augmented.h>
38 #include <linux/overflow.h>
39 
40 #include <linux/uaccess.h>
41 #include <asm/tlbflush.h>
42 #include <asm/shmparam.h>
43 
44 #include "internal.h"
45 #include "pgalloc-track.h"
46 
47 bool is_vmalloc_addr(const void *x)
48 {
49 	unsigned long addr = (unsigned long)x;
50 
51 	return addr >= VMALLOC_START && addr < VMALLOC_END;
52 }
53 EXPORT_SYMBOL(is_vmalloc_addr);
54 
55 struct vfree_deferred {
56 	struct llist_head list;
57 	struct work_struct wq;
58 };
59 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
60 
61 static void __vunmap(const void *, int);
62 
63 static void free_work(struct work_struct *w)
64 {
65 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
66 	struct llist_node *t, *llnode;
67 
68 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
69 		__vunmap((void *)llnode, 1);
70 }
71 
72 /*** Page table manipulation functions ***/
73 
74 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
75 			     pgtbl_mod_mask *mask)
76 {
77 	pte_t *pte;
78 
79 	pte = pte_offset_kernel(pmd, addr);
80 	do {
81 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
82 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
83 	} while (pte++, addr += PAGE_SIZE, addr != end);
84 	*mask |= PGTBL_PTE_MODIFIED;
85 }
86 
87 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
88 			     pgtbl_mod_mask *mask)
89 {
90 	pmd_t *pmd;
91 	unsigned long next;
92 	int cleared;
93 
94 	pmd = pmd_offset(pud, addr);
95 	do {
96 		next = pmd_addr_end(addr, end);
97 
98 		cleared = pmd_clear_huge(pmd);
99 		if (cleared || pmd_bad(*pmd))
100 			*mask |= PGTBL_PMD_MODIFIED;
101 
102 		if (cleared)
103 			continue;
104 		if (pmd_none_or_clear_bad(pmd))
105 			continue;
106 		vunmap_pte_range(pmd, addr, next, mask);
107 
108 		cond_resched();
109 	} while (pmd++, addr = next, addr != end);
110 }
111 
112 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
113 			     pgtbl_mod_mask *mask)
114 {
115 	pud_t *pud;
116 	unsigned long next;
117 	int cleared;
118 
119 	pud = pud_offset(p4d, addr);
120 	do {
121 		next = pud_addr_end(addr, end);
122 
123 		cleared = pud_clear_huge(pud);
124 		if (cleared || pud_bad(*pud))
125 			*mask |= PGTBL_PUD_MODIFIED;
126 
127 		if (cleared)
128 			continue;
129 		if (pud_none_or_clear_bad(pud))
130 			continue;
131 		vunmap_pmd_range(pud, addr, next, mask);
132 	} while (pud++, addr = next, addr != end);
133 }
134 
135 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
136 			     pgtbl_mod_mask *mask)
137 {
138 	p4d_t *p4d;
139 	unsigned long next;
140 	int cleared;
141 
142 	p4d = p4d_offset(pgd, addr);
143 	do {
144 		next = p4d_addr_end(addr, end);
145 
146 		cleared = p4d_clear_huge(p4d);
147 		if (cleared || p4d_bad(*p4d))
148 			*mask |= PGTBL_P4D_MODIFIED;
149 
150 		if (cleared)
151 			continue;
152 		if (p4d_none_or_clear_bad(p4d))
153 			continue;
154 		vunmap_pud_range(p4d, addr, next, mask);
155 	} while (p4d++, addr = next, addr != end);
156 }
157 
158 /**
159  * unmap_kernel_range_noflush - unmap kernel VM area
160  * @start: start of the VM area to unmap
161  * @size: size of the VM area to unmap
162  *
163  * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size specify
164  * should have been allocated using get_vm_area() and its friends.
165  *
166  * NOTE:
167  * This function does NOT do any cache flushing.  The caller is responsible
168  * for calling flush_cache_vunmap() on to-be-mapped areas before calling this
169  * function and flush_tlb_kernel_range() after.
170  */
171 void unmap_kernel_range_noflush(unsigned long start, unsigned long size)
172 {
173 	unsigned long end = start + size;
174 	unsigned long next;
175 	pgd_t *pgd;
176 	unsigned long addr = start;
177 	pgtbl_mod_mask mask = 0;
178 
179 	BUG_ON(addr >= end);
180 	pgd = pgd_offset_k(addr);
181 	do {
182 		next = pgd_addr_end(addr, end);
183 		if (pgd_bad(*pgd))
184 			mask |= PGTBL_PGD_MODIFIED;
185 		if (pgd_none_or_clear_bad(pgd))
186 			continue;
187 		vunmap_p4d_range(pgd, addr, next, &mask);
188 	} while (pgd++, addr = next, addr != end);
189 
190 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
191 		arch_sync_kernel_mappings(start, end);
192 }
193 
194 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
195 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
196 		pgtbl_mod_mask *mask)
197 {
198 	pte_t *pte;
199 
200 	/*
201 	 * nr is a running index into the array which helps higher level
202 	 * callers keep track of where we're up to.
203 	 */
204 
205 	pte = pte_alloc_kernel_track(pmd, addr, mask);
206 	if (!pte)
207 		return -ENOMEM;
208 	do {
209 		struct page *page = pages[*nr];
210 
211 		if (WARN_ON(!pte_none(*pte)))
212 			return -EBUSY;
213 		if (WARN_ON(!page))
214 			return -ENOMEM;
215 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
216 		(*nr)++;
217 	} while (pte++, addr += PAGE_SIZE, addr != end);
218 	*mask |= PGTBL_PTE_MODIFIED;
219 	return 0;
220 }
221 
222 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
223 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
224 		pgtbl_mod_mask *mask)
225 {
226 	pmd_t *pmd;
227 	unsigned long next;
228 
229 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
230 	if (!pmd)
231 		return -ENOMEM;
232 	do {
233 		next = pmd_addr_end(addr, end);
234 		if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask))
235 			return -ENOMEM;
236 	} while (pmd++, addr = next, addr != end);
237 	return 0;
238 }
239 
240 static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
241 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
242 		pgtbl_mod_mask *mask)
243 {
244 	pud_t *pud;
245 	unsigned long next;
246 
247 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
248 	if (!pud)
249 		return -ENOMEM;
250 	do {
251 		next = pud_addr_end(addr, end);
252 		if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask))
253 			return -ENOMEM;
254 	} while (pud++, addr = next, addr != end);
255 	return 0;
256 }
257 
258 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
259 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
260 		pgtbl_mod_mask *mask)
261 {
262 	p4d_t *p4d;
263 	unsigned long next;
264 
265 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
266 	if (!p4d)
267 		return -ENOMEM;
268 	do {
269 		next = p4d_addr_end(addr, end);
270 		if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask))
271 			return -ENOMEM;
272 	} while (p4d++, addr = next, addr != end);
273 	return 0;
274 }
275 
276 /**
277  * map_kernel_range_noflush - map kernel VM area with the specified pages
278  * @addr: start of the VM area to map
279  * @size: size of the VM area to map
280  * @prot: page protection flags to use
281  * @pages: pages to map
282  *
283  * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size specify should
284  * have been allocated using get_vm_area() and its friends.
285  *
286  * NOTE:
287  * This function does NOT do any cache flushing.  The caller is responsible for
288  * calling flush_cache_vmap() on to-be-mapped areas before calling this
289  * function.
290  *
291  * RETURNS:
292  * 0 on success, -errno on failure.
293  */
294 int map_kernel_range_noflush(unsigned long addr, unsigned long size,
295 			     pgprot_t prot, struct page **pages)
296 {
297 	unsigned long start = addr;
298 	unsigned long end = addr + size;
299 	unsigned long next;
300 	pgd_t *pgd;
301 	int err = 0;
302 	int nr = 0;
303 	pgtbl_mod_mask mask = 0;
304 
305 	BUG_ON(addr >= end);
306 	pgd = pgd_offset_k(addr);
307 	do {
308 		next = pgd_addr_end(addr, end);
309 		if (pgd_bad(*pgd))
310 			mask |= PGTBL_PGD_MODIFIED;
311 		err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
312 		if (err)
313 			return err;
314 	} while (pgd++, addr = next, addr != end);
315 
316 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
317 		arch_sync_kernel_mappings(start, end);
318 
319 	return 0;
320 }
321 
322 int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
323 		struct page **pages)
324 {
325 	int ret;
326 
327 	ret = map_kernel_range_noflush(start, size, prot, pages);
328 	flush_cache_vmap(start, start + size);
329 	return ret;
330 }
331 
332 int is_vmalloc_or_module_addr(const void *x)
333 {
334 	/*
335 	 * ARM, x86-64 and sparc64 put modules in a special place,
336 	 * and fall back on vmalloc() if that fails. Others
337 	 * just put it in the vmalloc space.
338 	 */
339 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
340 	unsigned long addr = (unsigned long)x;
341 	if (addr >= MODULES_VADDR && addr < MODULES_END)
342 		return 1;
343 #endif
344 	return is_vmalloc_addr(x);
345 }
346 
347 /*
348  * Walk a vmap address to the struct page it maps.
349  */
350 struct page *vmalloc_to_page(const void *vmalloc_addr)
351 {
352 	unsigned long addr = (unsigned long) vmalloc_addr;
353 	struct page *page = NULL;
354 	pgd_t *pgd = pgd_offset_k(addr);
355 	p4d_t *p4d;
356 	pud_t *pud;
357 	pmd_t *pmd;
358 	pte_t *ptep, pte;
359 
360 	/*
361 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
362 	 * architectures that do not vmalloc module space
363 	 */
364 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
365 
366 	if (pgd_none(*pgd))
367 		return NULL;
368 	p4d = p4d_offset(pgd, addr);
369 	if (p4d_none(*p4d))
370 		return NULL;
371 	pud = pud_offset(p4d, addr);
372 
373 	/*
374 	 * Don't dereference bad PUD or PMD (below) entries. This will also
375 	 * identify huge mappings, which we may encounter on architectures
376 	 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
377 	 * identified as vmalloc addresses by is_vmalloc_addr(), but are
378 	 * not [unambiguously] associated with a struct page, so there is
379 	 * no correct value to return for them.
380 	 */
381 	WARN_ON_ONCE(pud_bad(*pud));
382 	if (pud_none(*pud) || pud_bad(*pud))
383 		return NULL;
384 	pmd = pmd_offset(pud, addr);
385 	WARN_ON_ONCE(pmd_bad(*pmd));
386 	if (pmd_none(*pmd) || pmd_bad(*pmd))
387 		return NULL;
388 
389 	ptep = pte_offset_map(pmd, addr);
390 	pte = *ptep;
391 	if (pte_present(pte))
392 		page = pte_page(pte);
393 	pte_unmap(ptep);
394 	return page;
395 }
396 EXPORT_SYMBOL(vmalloc_to_page);
397 
398 /*
399  * Map a vmalloc()-space virtual address to the physical page frame number.
400  */
401 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
402 {
403 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
404 }
405 EXPORT_SYMBOL(vmalloc_to_pfn);
406 
407 
408 /*** Global kva allocator ***/
409 
410 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
411 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
412 
413 
414 static DEFINE_SPINLOCK(vmap_area_lock);
415 static DEFINE_SPINLOCK(free_vmap_area_lock);
416 /* Export for kexec only */
417 LIST_HEAD(vmap_area_list);
418 static LLIST_HEAD(vmap_purge_list);
419 static struct rb_root vmap_area_root = RB_ROOT;
420 static bool vmap_initialized __read_mostly;
421 
422 /*
423  * This kmem_cache is used for vmap_area objects. Instead of
424  * allocating from slab we reuse an object from this cache to
425  * make things faster. Especially in "no edge" splitting of
426  * free block.
427  */
428 static struct kmem_cache *vmap_area_cachep;
429 
430 /*
431  * This linked list is used in pair with free_vmap_area_root.
432  * It gives O(1) access to prev/next to perform fast coalescing.
433  */
434 static LIST_HEAD(free_vmap_area_list);
435 
436 /*
437  * This augment red-black tree represents the free vmap space.
438  * All vmap_area objects in this tree are sorted by va->va_start
439  * address. It is used for allocation and merging when a vmap
440  * object is released.
441  *
442  * Each vmap_area node contains a maximum available free block
443  * of its sub-tree, right or left. Therefore it is possible to
444  * find a lowest match of free area.
445  */
446 static struct rb_root free_vmap_area_root = RB_ROOT;
447 
448 /*
449  * Preload a CPU with one object for "no edge" split case. The
450  * aim is to get rid of allocations from the atomic context, thus
451  * to use more permissive allocation masks.
452  */
453 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
454 
455 static __always_inline unsigned long
456 va_size(struct vmap_area *va)
457 {
458 	return (va->va_end - va->va_start);
459 }
460 
461 static __always_inline unsigned long
462 get_subtree_max_size(struct rb_node *node)
463 {
464 	struct vmap_area *va;
465 
466 	va = rb_entry_safe(node, struct vmap_area, rb_node);
467 	return va ? va->subtree_max_size : 0;
468 }
469 
470 /*
471  * Gets called when remove the node and rotate.
472  */
473 static __always_inline unsigned long
474 compute_subtree_max_size(struct vmap_area *va)
475 {
476 	return max3(va_size(va),
477 		get_subtree_max_size(va->rb_node.rb_left),
478 		get_subtree_max_size(va->rb_node.rb_right));
479 }
480 
481 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
482 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
483 
484 static void purge_vmap_area_lazy(void);
485 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
486 static unsigned long lazy_max_pages(void);
487 
488 static atomic_long_t nr_vmalloc_pages;
489 
490 unsigned long vmalloc_nr_pages(void)
491 {
492 	return atomic_long_read(&nr_vmalloc_pages);
493 }
494 
495 static struct vmap_area *__find_vmap_area(unsigned long addr)
496 {
497 	struct rb_node *n = vmap_area_root.rb_node;
498 
499 	while (n) {
500 		struct vmap_area *va;
501 
502 		va = rb_entry(n, struct vmap_area, rb_node);
503 		if (addr < va->va_start)
504 			n = n->rb_left;
505 		else if (addr >= va->va_end)
506 			n = n->rb_right;
507 		else
508 			return va;
509 	}
510 
511 	return NULL;
512 }
513 
514 /*
515  * This function returns back addresses of parent node
516  * and its left or right link for further processing.
517  *
518  * Otherwise NULL is returned. In that case all further
519  * steps regarding inserting of conflicting overlap range
520  * have to be declined and actually considered as a bug.
521  */
522 static __always_inline struct rb_node **
523 find_va_links(struct vmap_area *va,
524 	struct rb_root *root, struct rb_node *from,
525 	struct rb_node **parent)
526 {
527 	struct vmap_area *tmp_va;
528 	struct rb_node **link;
529 
530 	if (root) {
531 		link = &root->rb_node;
532 		if (unlikely(!*link)) {
533 			*parent = NULL;
534 			return link;
535 		}
536 	} else {
537 		link = &from;
538 	}
539 
540 	/*
541 	 * Go to the bottom of the tree. When we hit the last point
542 	 * we end up with parent rb_node and correct direction, i name
543 	 * it link, where the new va->rb_node will be attached to.
544 	 */
545 	do {
546 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
547 
548 		/*
549 		 * During the traversal we also do some sanity check.
550 		 * Trigger the BUG() if there are sides(left/right)
551 		 * or full overlaps.
552 		 */
553 		if (va->va_start < tmp_va->va_end &&
554 				va->va_end <= tmp_va->va_start)
555 			link = &(*link)->rb_left;
556 		else if (va->va_end > tmp_va->va_start &&
557 				va->va_start >= tmp_va->va_end)
558 			link = &(*link)->rb_right;
559 		else {
560 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
561 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
562 
563 			return NULL;
564 		}
565 	} while (*link);
566 
567 	*parent = &tmp_va->rb_node;
568 	return link;
569 }
570 
571 static __always_inline struct list_head *
572 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
573 {
574 	struct list_head *list;
575 
576 	if (unlikely(!parent))
577 		/*
578 		 * The red-black tree where we try to find VA neighbors
579 		 * before merging or inserting is empty, i.e. it means
580 		 * there is no free vmap space. Normally it does not
581 		 * happen but we handle this case anyway.
582 		 */
583 		return NULL;
584 
585 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
586 	return (&parent->rb_right == link ? list->next : list);
587 }
588 
589 static __always_inline void
590 link_va(struct vmap_area *va, struct rb_root *root,
591 	struct rb_node *parent, struct rb_node **link, struct list_head *head)
592 {
593 	/*
594 	 * VA is still not in the list, but we can
595 	 * identify its future previous list_head node.
596 	 */
597 	if (likely(parent)) {
598 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
599 		if (&parent->rb_right != link)
600 			head = head->prev;
601 	}
602 
603 	/* Insert to the rb-tree */
604 	rb_link_node(&va->rb_node, parent, link);
605 	if (root == &free_vmap_area_root) {
606 		/*
607 		 * Some explanation here. Just perform simple insertion
608 		 * to the tree. We do not set va->subtree_max_size to
609 		 * its current size before calling rb_insert_augmented().
610 		 * It is because of we populate the tree from the bottom
611 		 * to parent levels when the node _is_ in the tree.
612 		 *
613 		 * Therefore we set subtree_max_size to zero after insertion,
614 		 * to let __augment_tree_propagate_from() puts everything to
615 		 * the correct order later on.
616 		 */
617 		rb_insert_augmented(&va->rb_node,
618 			root, &free_vmap_area_rb_augment_cb);
619 		va->subtree_max_size = 0;
620 	} else {
621 		rb_insert_color(&va->rb_node, root);
622 	}
623 
624 	/* Address-sort this list */
625 	list_add(&va->list, head);
626 }
627 
628 static __always_inline void
629 unlink_va(struct vmap_area *va, struct rb_root *root)
630 {
631 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
632 		return;
633 
634 	if (root == &free_vmap_area_root)
635 		rb_erase_augmented(&va->rb_node,
636 			root, &free_vmap_area_rb_augment_cb);
637 	else
638 		rb_erase(&va->rb_node, root);
639 
640 	list_del(&va->list);
641 	RB_CLEAR_NODE(&va->rb_node);
642 }
643 
644 #if DEBUG_AUGMENT_PROPAGATE_CHECK
645 static void
646 augment_tree_propagate_check(void)
647 {
648 	struct vmap_area *va;
649 	unsigned long computed_size;
650 
651 	list_for_each_entry(va, &free_vmap_area_list, list) {
652 		computed_size = compute_subtree_max_size(va);
653 		if (computed_size != va->subtree_max_size)
654 			pr_emerg("tree is corrupted: %lu, %lu\n",
655 				va_size(va), va->subtree_max_size);
656 	}
657 }
658 #endif
659 
660 /*
661  * This function populates subtree_max_size from bottom to upper
662  * levels starting from VA point. The propagation must be done
663  * when VA size is modified by changing its va_start/va_end. Or
664  * in case of newly inserting of VA to the tree.
665  *
666  * It means that __augment_tree_propagate_from() must be called:
667  * - After VA has been inserted to the tree(free path);
668  * - After VA has been shrunk(allocation path);
669  * - After VA has been increased(merging path).
670  *
671  * Please note that, it does not mean that upper parent nodes
672  * and their subtree_max_size are recalculated all the time up
673  * to the root node.
674  *
675  *       4--8
676  *        /\
677  *       /  \
678  *      /    \
679  *    2--2  8--8
680  *
681  * For example if we modify the node 4, shrinking it to 2, then
682  * no any modification is required. If we shrink the node 2 to 1
683  * its subtree_max_size is updated only, and set to 1. If we shrink
684  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
685  * node becomes 4--6.
686  */
687 static __always_inline void
688 augment_tree_propagate_from(struct vmap_area *va)
689 {
690 	/*
691 	 * Populate the tree from bottom towards the root until
692 	 * the calculated maximum available size of checked node
693 	 * is equal to its current one.
694 	 */
695 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
696 
697 #if DEBUG_AUGMENT_PROPAGATE_CHECK
698 	augment_tree_propagate_check();
699 #endif
700 }
701 
702 static void
703 insert_vmap_area(struct vmap_area *va,
704 	struct rb_root *root, struct list_head *head)
705 {
706 	struct rb_node **link;
707 	struct rb_node *parent;
708 
709 	link = find_va_links(va, root, NULL, &parent);
710 	if (link)
711 		link_va(va, root, parent, link, head);
712 }
713 
714 static void
715 insert_vmap_area_augment(struct vmap_area *va,
716 	struct rb_node *from, struct rb_root *root,
717 	struct list_head *head)
718 {
719 	struct rb_node **link;
720 	struct rb_node *parent;
721 
722 	if (from)
723 		link = find_va_links(va, NULL, from, &parent);
724 	else
725 		link = find_va_links(va, root, NULL, &parent);
726 
727 	if (link) {
728 		link_va(va, root, parent, link, head);
729 		augment_tree_propagate_from(va);
730 	}
731 }
732 
733 /*
734  * Merge de-allocated chunk of VA memory with previous
735  * and next free blocks. If coalesce is not done a new
736  * free area is inserted. If VA has been merged, it is
737  * freed.
738  *
739  * Please note, it can return NULL in case of overlap
740  * ranges, followed by WARN() report. Despite it is a
741  * buggy behaviour, a system can be alive and keep
742  * ongoing.
743  */
744 static __always_inline struct vmap_area *
745 merge_or_add_vmap_area(struct vmap_area *va,
746 	struct rb_root *root, struct list_head *head)
747 {
748 	struct vmap_area *sibling;
749 	struct list_head *next;
750 	struct rb_node **link;
751 	struct rb_node *parent;
752 	bool merged = false;
753 
754 	/*
755 	 * Find a place in the tree where VA potentially will be
756 	 * inserted, unless it is merged with its sibling/siblings.
757 	 */
758 	link = find_va_links(va, root, NULL, &parent);
759 	if (!link)
760 		return NULL;
761 
762 	/*
763 	 * Get next node of VA to check if merging can be done.
764 	 */
765 	next = get_va_next_sibling(parent, link);
766 	if (unlikely(next == NULL))
767 		goto insert;
768 
769 	/*
770 	 * start            end
771 	 * |                |
772 	 * |<------VA------>|<-----Next----->|
773 	 *                  |                |
774 	 *                  start            end
775 	 */
776 	if (next != head) {
777 		sibling = list_entry(next, struct vmap_area, list);
778 		if (sibling->va_start == va->va_end) {
779 			sibling->va_start = va->va_start;
780 
781 			/* Free vmap_area object. */
782 			kmem_cache_free(vmap_area_cachep, va);
783 
784 			/* Point to the new merged area. */
785 			va = sibling;
786 			merged = true;
787 		}
788 	}
789 
790 	/*
791 	 * start            end
792 	 * |                |
793 	 * |<-----Prev----->|<------VA------>|
794 	 *                  |                |
795 	 *                  start            end
796 	 */
797 	if (next->prev != head) {
798 		sibling = list_entry(next->prev, struct vmap_area, list);
799 		if (sibling->va_end == va->va_start) {
800 			/*
801 			 * If both neighbors are coalesced, it is important
802 			 * to unlink the "next" node first, followed by merging
803 			 * with "previous" one. Otherwise the tree might not be
804 			 * fully populated if a sibling's augmented value is
805 			 * "normalized" because of rotation operations.
806 			 */
807 			if (merged)
808 				unlink_va(va, root);
809 
810 			sibling->va_end = va->va_end;
811 
812 			/* Free vmap_area object. */
813 			kmem_cache_free(vmap_area_cachep, va);
814 
815 			/* Point to the new merged area. */
816 			va = sibling;
817 			merged = true;
818 		}
819 	}
820 
821 insert:
822 	if (!merged)
823 		link_va(va, root, parent, link, head);
824 
825 	/*
826 	 * Last step is to check and update the tree.
827 	 */
828 	augment_tree_propagate_from(va);
829 	return va;
830 }
831 
832 static __always_inline bool
833 is_within_this_va(struct vmap_area *va, unsigned long size,
834 	unsigned long align, unsigned long vstart)
835 {
836 	unsigned long nva_start_addr;
837 
838 	if (va->va_start > vstart)
839 		nva_start_addr = ALIGN(va->va_start, align);
840 	else
841 		nva_start_addr = ALIGN(vstart, align);
842 
843 	/* Can be overflowed due to big size or alignment. */
844 	if (nva_start_addr + size < nva_start_addr ||
845 			nva_start_addr < vstart)
846 		return false;
847 
848 	return (nva_start_addr + size <= va->va_end);
849 }
850 
851 /*
852  * Find the first free block(lowest start address) in the tree,
853  * that will accomplish the request corresponding to passing
854  * parameters.
855  */
856 static __always_inline struct vmap_area *
857 find_vmap_lowest_match(unsigned long size,
858 	unsigned long align, unsigned long vstart)
859 {
860 	struct vmap_area *va;
861 	struct rb_node *node;
862 	unsigned long length;
863 
864 	/* Start from the root. */
865 	node = free_vmap_area_root.rb_node;
866 
867 	/* Adjust the search size for alignment overhead. */
868 	length = size + align - 1;
869 
870 	while (node) {
871 		va = rb_entry(node, struct vmap_area, rb_node);
872 
873 		if (get_subtree_max_size(node->rb_left) >= length &&
874 				vstart < va->va_start) {
875 			node = node->rb_left;
876 		} else {
877 			if (is_within_this_va(va, size, align, vstart))
878 				return va;
879 
880 			/*
881 			 * Does not make sense to go deeper towards the right
882 			 * sub-tree if it does not have a free block that is
883 			 * equal or bigger to the requested search length.
884 			 */
885 			if (get_subtree_max_size(node->rb_right) >= length) {
886 				node = node->rb_right;
887 				continue;
888 			}
889 
890 			/*
891 			 * OK. We roll back and find the first right sub-tree,
892 			 * that will satisfy the search criteria. It can happen
893 			 * only once due to "vstart" restriction.
894 			 */
895 			while ((node = rb_parent(node))) {
896 				va = rb_entry(node, struct vmap_area, rb_node);
897 				if (is_within_this_va(va, size, align, vstart))
898 					return va;
899 
900 				if (get_subtree_max_size(node->rb_right) >= length &&
901 						vstart <= va->va_start) {
902 					node = node->rb_right;
903 					break;
904 				}
905 			}
906 		}
907 	}
908 
909 	return NULL;
910 }
911 
912 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
913 #include <linux/random.h>
914 
915 static struct vmap_area *
916 find_vmap_lowest_linear_match(unsigned long size,
917 	unsigned long align, unsigned long vstart)
918 {
919 	struct vmap_area *va;
920 
921 	list_for_each_entry(va, &free_vmap_area_list, list) {
922 		if (!is_within_this_va(va, size, align, vstart))
923 			continue;
924 
925 		return va;
926 	}
927 
928 	return NULL;
929 }
930 
931 static void
932 find_vmap_lowest_match_check(unsigned long size)
933 {
934 	struct vmap_area *va_1, *va_2;
935 	unsigned long vstart;
936 	unsigned int rnd;
937 
938 	get_random_bytes(&rnd, sizeof(rnd));
939 	vstart = VMALLOC_START + rnd;
940 
941 	va_1 = find_vmap_lowest_match(size, 1, vstart);
942 	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
943 
944 	if (va_1 != va_2)
945 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
946 			va_1, va_2, vstart);
947 }
948 #endif
949 
950 enum fit_type {
951 	NOTHING_FIT = 0,
952 	FL_FIT_TYPE = 1,	/* full fit */
953 	LE_FIT_TYPE = 2,	/* left edge fit */
954 	RE_FIT_TYPE = 3,	/* right edge fit */
955 	NE_FIT_TYPE = 4		/* no edge fit */
956 };
957 
958 static __always_inline enum fit_type
959 classify_va_fit_type(struct vmap_area *va,
960 	unsigned long nva_start_addr, unsigned long size)
961 {
962 	enum fit_type type;
963 
964 	/* Check if it is within VA. */
965 	if (nva_start_addr < va->va_start ||
966 			nva_start_addr + size > va->va_end)
967 		return NOTHING_FIT;
968 
969 	/* Now classify. */
970 	if (va->va_start == nva_start_addr) {
971 		if (va->va_end == nva_start_addr + size)
972 			type = FL_FIT_TYPE;
973 		else
974 			type = LE_FIT_TYPE;
975 	} else if (va->va_end == nva_start_addr + size) {
976 		type = RE_FIT_TYPE;
977 	} else {
978 		type = NE_FIT_TYPE;
979 	}
980 
981 	return type;
982 }
983 
984 static __always_inline int
985 adjust_va_to_fit_type(struct vmap_area *va,
986 	unsigned long nva_start_addr, unsigned long size,
987 	enum fit_type type)
988 {
989 	struct vmap_area *lva = NULL;
990 
991 	if (type == FL_FIT_TYPE) {
992 		/*
993 		 * No need to split VA, it fully fits.
994 		 *
995 		 * |               |
996 		 * V      NVA      V
997 		 * |---------------|
998 		 */
999 		unlink_va(va, &free_vmap_area_root);
1000 		kmem_cache_free(vmap_area_cachep, va);
1001 	} else if (type == LE_FIT_TYPE) {
1002 		/*
1003 		 * Split left edge of fit VA.
1004 		 *
1005 		 * |       |
1006 		 * V  NVA  V   R
1007 		 * |-------|-------|
1008 		 */
1009 		va->va_start += size;
1010 	} else if (type == RE_FIT_TYPE) {
1011 		/*
1012 		 * Split right edge of fit VA.
1013 		 *
1014 		 *         |       |
1015 		 *     L   V  NVA  V
1016 		 * |-------|-------|
1017 		 */
1018 		va->va_end = nva_start_addr;
1019 	} else if (type == NE_FIT_TYPE) {
1020 		/*
1021 		 * Split no edge of fit VA.
1022 		 *
1023 		 *     |       |
1024 		 *   L V  NVA  V R
1025 		 * |---|-------|---|
1026 		 */
1027 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1028 		if (unlikely(!lva)) {
1029 			/*
1030 			 * For percpu allocator we do not do any pre-allocation
1031 			 * and leave it as it is. The reason is it most likely
1032 			 * never ends up with NE_FIT_TYPE splitting. In case of
1033 			 * percpu allocations offsets and sizes are aligned to
1034 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1035 			 * are its main fitting cases.
1036 			 *
1037 			 * There are a few exceptions though, as an example it is
1038 			 * a first allocation (early boot up) when we have "one"
1039 			 * big free space that has to be split.
1040 			 *
1041 			 * Also we can hit this path in case of regular "vmap"
1042 			 * allocations, if "this" current CPU was not preloaded.
1043 			 * See the comment in alloc_vmap_area() why. If so, then
1044 			 * GFP_NOWAIT is used instead to get an extra object for
1045 			 * split purpose. That is rare and most time does not
1046 			 * occur.
1047 			 *
1048 			 * What happens if an allocation gets failed. Basically,
1049 			 * an "overflow" path is triggered to purge lazily freed
1050 			 * areas to free some memory, then, the "retry" path is
1051 			 * triggered to repeat one more time. See more details
1052 			 * in alloc_vmap_area() function.
1053 			 */
1054 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1055 			if (!lva)
1056 				return -1;
1057 		}
1058 
1059 		/*
1060 		 * Build the remainder.
1061 		 */
1062 		lva->va_start = va->va_start;
1063 		lva->va_end = nva_start_addr;
1064 
1065 		/*
1066 		 * Shrink this VA to remaining size.
1067 		 */
1068 		va->va_start = nva_start_addr + size;
1069 	} else {
1070 		return -1;
1071 	}
1072 
1073 	if (type != FL_FIT_TYPE) {
1074 		augment_tree_propagate_from(va);
1075 
1076 		if (lva)	/* type == NE_FIT_TYPE */
1077 			insert_vmap_area_augment(lva, &va->rb_node,
1078 				&free_vmap_area_root, &free_vmap_area_list);
1079 	}
1080 
1081 	return 0;
1082 }
1083 
1084 /*
1085  * Returns a start address of the newly allocated area, if success.
1086  * Otherwise a vend is returned that indicates failure.
1087  */
1088 static __always_inline unsigned long
1089 __alloc_vmap_area(unsigned long size, unsigned long align,
1090 	unsigned long vstart, unsigned long vend)
1091 {
1092 	unsigned long nva_start_addr;
1093 	struct vmap_area *va;
1094 	enum fit_type type;
1095 	int ret;
1096 
1097 	va = find_vmap_lowest_match(size, align, vstart);
1098 	if (unlikely(!va))
1099 		return vend;
1100 
1101 	if (va->va_start > vstart)
1102 		nva_start_addr = ALIGN(va->va_start, align);
1103 	else
1104 		nva_start_addr = ALIGN(vstart, align);
1105 
1106 	/* Check the "vend" restriction. */
1107 	if (nva_start_addr + size > vend)
1108 		return vend;
1109 
1110 	/* Classify what we have found. */
1111 	type = classify_va_fit_type(va, nva_start_addr, size);
1112 	if (WARN_ON_ONCE(type == NOTHING_FIT))
1113 		return vend;
1114 
1115 	/* Update the free vmap_area. */
1116 	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1117 	if (ret)
1118 		return vend;
1119 
1120 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1121 	find_vmap_lowest_match_check(size);
1122 #endif
1123 
1124 	return nva_start_addr;
1125 }
1126 
1127 /*
1128  * Free a region of KVA allocated by alloc_vmap_area
1129  */
1130 static void free_vmap_area(struct vmap_area *va)
1131 {
1132 	/*
1133 	 * Remove from the busy tree/list.
1134 	 */
1135 	spin_lock(&vmap_area_lock);
1136 	unlink_va(va, &vmap_area_root);
1137 	spin_unlock(&vmap_area_lock);
1138 
1139 	/*
1140 	 * Insert/Merge it back to the free tree/list.
1141 	 */
1142 	spin_lock(&free_vmap_area_lock);
1143 	merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
1144 	spin_unlock(&free_vmap_area_lock);
1145 }
1146 
1147 /*
1148  * Allocate a region of KVA of the specified size and alignment, within the
1149  * vstart and vend.
1150  */
1151 static struct vmap_area *alloc_vmap_area(unsigned long size,
1152 				unsigned long align,
1153 				unsigned long vstart, unsigned long vend,
1154 				int node, gfp_t gfp_mask)
1155 {
1156 	struct vmap_area *va, *pva;
1157 	unsigned long addr;
1158 	int purged = 0;
1159 	int ret;
1160 
1161 	BUG_ON(!size);
1162 	BUG_ON(offset_in_page(size));
1163 	BUG_ON(!is_power_of_2(align));
1164 
1165 	if (unlikely(!vmap_initialized))
1166 		return ERR_PTR(-EBUSY);
1167 
1168 	might_sleep();
1169 	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1170 
1171 	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1172 	if (unlikely(!va))
1173 		return ERR_PTR(-ENOMEM);
1174 
1175 	/*
1176 	 * Only scan the relevant parts containing pointers to other objects
1177 	 * to avoid false negatives.
1178 	 */
1179 	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1180 
1181 retry:
1182 	/*
1183 	 * Preload this CPU with one extra vmap_area object. It is used
1184 	 * when fit type of free area is NE_FIT_TYPE. Please note, it
1185 	 * does not guarantee that an allocation occurs on a CPU that
1186 	 * is preloaded, instead we minimize the case when it is not.
1187 	 * It can happen because of cpu migration, because there is a
1188 	 * race until the below spinlock is taken.
1189 	 *
1190 	 * The preload is done in non-atomic context, thus it allows us
1191 	 * to use more permissive allocation masks to be more stable under
1192 	 * low memory condition and high memory pressure. In rare case,
1193 	 * if not preloaded, GFP_NOWAIT is used.
1194 	 *
1195 	 * Set "pva" to NULL here, because of "retry" path.
1196 	 */
1197 	pva = NULL;
1198 
1199 	if (!this_cpu_read(ne_fit_preload_node))
1200 		/*
1201 		 * Even if it fails we do not really care about that.
1202 		 * Just proceed as it is. If needed "overflow" path
1203 		 * will refill the cache we allocate from.
1204 		 */
1205 		pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1206 
1207 	spin_lock(&free_vmap_area_lock);
1208 
1209 	if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva))
1210 		kmem_cache_free(vmap_area_cachep, pva);
1211 
1212 	/*
1213 	 * If an allocation fails, the "vend" address is
1214 	 * returned. Therefore trigger the overflow path.
1215 	 */
1216 	addr = __alloc_vmap_area(size, align, vstart, vend);
1217 	spin_unlock(&free_vmap_area_lock);
1218 
1219 	if (unlikely(addr == vend))
1220 		goto overflow;
1221 
1222 	va->va_start = addr;
1223 	va->va_end = addr + size;
1224 	va->vm = NULL;
1225 
1226 
1227 	spin_lock(&vmap_area_lock);
1228 	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1229 	spin_unlock(&vmap_area_lock);
1230 
1231 	BUG_ON(!IS_ALIGNED(va->va_start, align));
1232 	BUG_ON(va->va_start < vstart);
1233 	BUG_ON(va->va_end > vend);
1234 
1235 	ret = kasan_populate_vmalloc(addr, size);
1236 	if (ret) {
1237 		free_vmap_area(va);
1238 		return ERR_PTR(ret);
1239 	}
1240 
1241 	return va;
1242 
1243 overflow:
1244 	if (!purged) {
1245 		purge_vmap_area_lazy();
1246 		purged = 1;
1247 		goto retry;
1248 	}
1249 
1250 	if (gfpflags_allow_blocking(gfp_mask)) {
1251 		unsigned long freed = 0;
1252 		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1253 		if (freed > 0) {
1254 			purged = 0;
1255 			goto retry;
1256 		}
1257 	}
1258 
1259 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1260 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1261 			size);
1262 
1263 	kmem_cache_free(vmap_area_cachep, va);
1264 	return ERR_PTR(-EBUSY);
1265 }
1266 
1267 int register_vmap_purge_notifier(struct notifier_block *nb)
1268 {
1269 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1270 }
1271 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1272 
1273 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1274 {
1275 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1276 }
1277 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1278 
1279 /*
1280  * lazy_max_pages is the maximum amount of virtual address space we gather up
1281  * before attempting to purge with a TLB flush.
1282  *
1283  * There is a tradeoff here: a larger number will cover more kernel page tables
1284  * and take slightly longer to purge, but it will linearly reduce the number of
1285  * global TLB flushes that must be performed. It would seem natural to scale
1286  * this number up linearly with the number of CPUs (because vmapping activity
1287  * could also scale linearly with the number of CPUs), however it is likely
1288  * that in practice, workloads might be constrained in other ways that mean
1289  * vmap activity will not scale linearly with CPUs. Also, I want to be
1290  * conservative and not introduce a big latency on huge systems, so go with
1291  * a less aggressive log scale. It will still be an improvement over the old
1292  * code, and it will be simple to change the scale factor if we find that it
1293  * becomes a problem on bigger systems.
1294  */
1295 static unsigned long lazy_max_pages(void)
1296 {
1297 	unsigned int log;
1298 
1299 	log = fls(num_online_cpus());
1300 
1301 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1302 }
1303 
1304 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1305 
1306 /*
1307  * Serialize vmap purging.  There is no actual criticial section protected
1308  * by this look, but we want to avoid concurrent calls for performance
1309  * reasons and to make the pcpu_get_vm_areas more deterministic.
1310  */
1311 static DEFINE_MUTEX(vmap_purge_lock);
1312 
1313 /* for per-CPU blocks */
1314 static void purge_fragmented_blocks_allcpus(void);
1315 
1316 /*
1317  * called before a call to iounmap() if the caller wants vm_area_struct's
1318  * immediately freed.
1319  */
1320 void set_iounmap_nonlazy(void)
1321 {
1322 	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1323 }
1324 
1325 /*
1326  * Purges all lazily-freed vmap areas.
1327  */
1328 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1329 {
1330 	unsigned long resched_threshold;
1331 	struct llist_node *valist;
1332 	struct vmap_area *va;
1333 	struct vmap_area *n_va;
1334 
1335 	lockdep_assert_held(&vmap_purge_lock);
1336 
1337 	valist = llist_del_all(&vmap_purge_list);
1338 	if (unlikely(valist == NULL))
1339 		return false;
1340 
1341 	/*
1342 	 * TODO: to calculate a flush range without looping.
1343 	 * The list can be up to lazy_max_pages() elements.
1344 	 */
1345 	llist_for_each_entry(va, valist, purge_list) {
1346 		if (va->va_start < start)
1347 			start = va->va_start;
1348 		if (va->va_end > end)
1349 			end = va->va_end;
1350 	}
1351 
1352 	flush_tlb_kernel_range(start, end);
1353 	resched_threshold = lazy_max_pages() << 1;
1354 
1355 	spin_lock(&free_vmap_area_lock);
1356 	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1357 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1358 		unsigned long orig_start = va->va_start;
1359 		unsigned long orig_end = va->va_end;
1360 
1361 		/*
1362 		 * Finally insert or merge lazily-freed area. It is
1363 		 * detached and there is no need to "unlink" it from
1364 		 * anything.
1365 		 */
1366 		va = merge_or_add_vmap_area(va, &free_vmap_area_root,
1367 					    &free_vmap_area_list);
1368 
1369 		if (!va)
1370 			continue;
1371 
1372 		if (is_vmalloc_or_module_addr((void *)orig_start))
1373 			kasan_release_vmalloc(orig_start, orig_end,
1374 					      va->va_start, va->va_end);
1375 
1376 		atomic_long_sub(nr, &vmap_lazy_nr);
1377 
1378 		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1379 			cond_resched_lock(&free_vmap_area_lock);
1380 	}
1381 	spin_unlock(&free_vmap_area_lock);
1382 	return true;
1383 }
1384 
1385 /*
1386  * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1387  * is already purging.
1388  */
1389 static void try_purge_vmap_area_lazy(void)
1390 {
1391 	if (mutex_trylock(&vmap_purge_lock)) {
1392 		__purge_vmap_area_lazy(ULONG_MAX, 0);
1393 		mutex_unlock(&vmap_purge_lock);
1394 	}
1395 }
1396 
1397 /*
1398  * Kick off a purge of the outstanding lazy areas.
1399  */
1400 static void purge_vmap_area_lazy(void)
1401 {
1402 	mutex_lock(&vmap_purge_lock);
1403 	purge_fragmented_blocks_allcpus();
1404 	__purge_vmap_area_lazy(ULONG_MAX, 0);
1405 	mutex_unlock(&vmap_purge_lock);
1406 }
1407 
1408 /*
1409  * Free a vmap area, caller ensuring that the area has been unmapped
1410  * and flush_cache_vunmap had been called for the correct range
1411  * previously.
1412  */
1413 static void free_vmap_area_noflush(struct vmap_area *va)
1414 {
1415 	unsigned long nr_lazy;
1416 
1417 	spin_lock(&vmap_area_lock);
1418 	unlink_va(va, &vmap_area_root);
1419 	spin_unlock(&vmap_area_lock);
1420 
1421 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1422 				PAGE_SHIFT, &vmap_lazy_nr);
1423 
1424 	/* After this point, we may free va at any time */
1425 	llist_add(&va->purge_list, &vmap_purge_list);
1426 
1427 	if (unlikely(nr_lazy > lazy_max_pages()))
1428 		try_purge_vmap_area_lazy();
1429 }
1430 
1431 /*
1432  * Free and unmap a vmap area
1433  */
1434 static void free_unmap_vmap_area(struct vmap_area *va)
1435 {
1436 	flush_cache_vunmap(va->va_start, va->va_end);
1437 	unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
1438 	if (debug_pagealloc_enabled_static())
1439 		flush_tlb_kernel_range(va->va_start, va->va_end);
1440 
1441 	free_vmap_area_noflush(va);
1442 }
1443 
1444 static struct vmap_area *find_vmap_area(unsigned long addr)
1445 {
1446 	struct vmap_area *va;
1447 
1448 	spin_lock(&vmap_area_lock);
1449 	va = __find_vmap_area(addr);
1450 	spin_unlock(&vmap_area_lock);
1451 
1452 	return va;
1453 }
1454 
1455 /*** Per cpu kva allocator ***/
1456 
1457 /*
1458  * vmap space is limited especially on 32 bit architectures. Ensure there is
1459  * room for at least 16 percpu vmap blocks per CPU.
1460  */
1461 /*
1462  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1463  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1464  * instead (we just need a rough idea)
1465  */
1466 #if BITS_PER_LONG == 32
1467 #define VMALLOC_SPACE		(128UL*1024*1024)
1468 #else
1469 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
1470 #endif
1471 
1472 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1473 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1474 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1475 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1476 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1477 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1478 #define VMAP_BBMAP_BITS		\
1479 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1480 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1481 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1482 
1483 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1484 
1485 struct vmap_block_queue {
1486 	spinlock_t lock;
1487 	struct list_head free;
1488 };
1489 
1490 struct vmap_block {
1491 	spinlock_t lock;
1492 	struct vmap_area *va;
1493 	unsigned long free, dirty;
1494 	unsigned long dirty_min, dirty_max; /*< dirty range */
1495 	struct list_head free_list;
1496 	struct rcu_head rcu_head;
1497 	struct list_head purge;
1498 };
1499 
1500 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1501 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1502 
1503 /*
1504  * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1505  * in the free path. Could get rid of this if we change the API to return a
1506  * "cookie" from alloc, to be passed to free. But no big deal yet.
1507  */
1508 static DEFINE_XARRAY(vmap_blocks);
1509 
1510 /*
1511  * We should probably have a fallback mechanism to allocate virtual memory
1512  * out of partially filled vmap blocks. However vmap block sizing should be
1513  * fairly reasonable according to the vmalloc size, so it shouldn't be a
1514  * big problem.
1515  */
1516 
1517 static unsigned long addr_to_vb_idx(unsigned long addr)
1518 {
1519 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1520 	addr /= VMAP_BLOCK_SIZE;
1521 	return addr;
1522 }
1523 
1524 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1525 {
1526 	unsigned long addr;
1527 
1528 	addr = va_start + (pages_off << PAGE_SHIFT);
1529 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1530 	return (void *)addr;
1531 }
1532 
1533 /**
1534  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1535  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1536  * @order:    how many 2^order pages should be occupied in newly allocated block
1537  * @gfp_mask: flags for the page level allocator
1538  *
1539  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1540  */
1541 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1542 {
1543 	struct vmap_block_queue *vbq;
1544 	struct vmap_block *vb;
1545 	struct vmap_area *va;
1546 	unsigned long vb_idx;
1547 	int node, err;
1548 	void *vaddr;
1549 
1550 	node = numa_node_id();
1551 
1552 	vb = kmalloc_node(sizeof(struct vmap_block),
1553 			gfp_mask & GFP_RECLAIM_MASK, node);
1554 	if (unlikely(!vb))
1555 		return ERR_PTR(-ENOMEM);
1556 
1557 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1558 					VMALLOC_START, VMALLOC_END,
1559 					node, gfp_mask);
1560 	if (IS_ERR(va)) {
1561 		kfree(vb);
1562 		return ERR_CAST(va);
1563 	}
1564 
1565 	vaddr = vmap_block_vaddr(va->va_start, 0);
1566 	spin_lock_init(&vb->lock);
1567 	vb->va = va;
1568 	/* At least something should be left free */
1569 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1570 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1571 	vb->dirty = 0;
1572 	vb->dirty_min = VMAP_BBMAP_BITS;
1573 	vb->dirty_max = 0;
1574 	INIT_LIST_HEAD(&vb->free_list);
1575 
1576 	vb_idx = addr_to_vb_idx(va->va_start);
1577 	err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1578 	if (err) {
1579 		kfree(vb);
1580 		free_vmap_area(va);
1581 		return ERR_PTR(err);
1582 	}
1583 
1584 	vbq = &get_cpu_var(vmap_block_queue);
1585 	spin_lock(&vbq->lock);
1586 	list_add_tail_rcu(&vb->free_list, &vbq->free);
1587 	spin_unlock(&vbq->lock);
1588 	put_cpu_var(vmap_block_queue);
1589 
1590 	return vaddr;
1591 }
1592 
1593 static void free_vmap_block(struct vmap_block *vb)
1594 {
1595 	struct vmap_block *tmp;
1596 
1597 	tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1598 	BUG_ON(tmp != vb);
1599 
1600 	free_vmap_area_noflush(vb->va);
1601 	kfree_rcu(vb, rcu_head);
1602 }
1603 
1604 static void purge_fragmented_blocks(int cpu)
1605 {
1606 	LIST_HEAD(purge);
1607 	struct vmap_block *vb;
1608 	struct vmap_block *n_vb;
1609 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1610 
1611 	rcu_read_lock();
1612 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1613 
1614 		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1615 			continue;
1616 
1617 		spin_lock(&vb->lock);
1618 		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1619 			vb->free = 0; /* prevent further allocs after releasing lock */
1620 			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1621 			vb->dirty_min = 0;
1622 			vb->dirty_max = VMAP_BBMAP_BITS;
1623 			spin_lock(&vbq->lock);
1624 			list_del_rcu(&vb->free_list);
1625 			spin_unlock(&vbq->lock);
1626 			spin_unlock(&vb->lock);
1627 			list_add_tail(&vb->purge, &purge);
1628 		} else
1629 			spin_unlock(&vb->lock);
1630 	}
1631 	rcu_read_unlock();
1632 
1633 	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1634 		list_del(&vb->purge);
1635 		free_vmap_block(vb);
1636 	}
1637 }
1638 
1639 static void purge_fragmented_blocks_allcpus(void)
1640 {
1641 	int cpu;
1642 
1643 	for_each_possible_cpu(cpu)
1644 		purge_fragmented_blocks(cpu);
1645 }
1646 
1647 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1648 {
1649 	struct vmap_block_queue *vbq;
1650 	struct vmap_block *vb;
1651 	void *vaddr = NULL;
1652 	unsigned int order;
1653 
1654 	BUG_ON(offset_in_page(size));
1655 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1656 	if (WARN_ON(size == 0)) {
1657 		/*
1658 		 * Allocating 0 bytes isn't what caller wants since
1659 		 * get_order(0) returns funny result. Just warn and terminate
1660 		 * early.
1661 		 */
1662 		return NULL;
1663 	}
1664 	order = get_order(size);
1665 
1666 	rcu_read_lock();
1667 	vbq = &get_cpu_var(vmap_block_queue);
1668 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1669 		unsigned long pages_off;
1670 
1671 		spin_lock(&vb->lock);
1672 		if (vb->free < (1UL << order)) {
1673 			spin_unlock(&vb->lock);
1674 			continue;
1675 		}
1676 
1677 		pages_off = VMAP_BBMAP_BITS - vb->free;
1678 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1679 		vb->free -= 1UL << order;
1680 		if (vb->free == 0) {
1681 			spin_lock(&vbq->lock);
1682 			list_del_rcu(&vb->free_list);
1683 			spin_unlock(&vbq->lock);
1684 		}
1685 
1686 		spin_unlock(&vb->lock);
1687 		break;
1688 	}
1689 
1690 	put_cpu_var(vmap_block_queue);
1691 	rcu_read_unlock();
1692 
1693 	/* Allocate new block if nothing was found */
1694 	if (!vaddr)
1695 		vaddr = new_vmap_block(order, gfp_mask);
1696 
1697 	return vaddr;
1698 }
1699 
1700 static void vb_free(unsigned long addr, unsigned long size)
1701 {
1702 	unsigned long offset;
1703 	unsigned int order;
1704 	struct vmap_block *vb;
1705 
1706 	BUG_ON(offset_in_page(size));
1707 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1708 
1709 	flush_cache_vunmap(addr, addr + size);
1710 
1711 	order = get_order(size);
1712 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
1713 	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
1714 
1715 	unmap_kernel_range_noflush(addr, size);
1716 
1717 	if (debug_pagealloc_enabled_static())
1718 		flush_tlb_kernel_range(addr, addr + size);
1719 
1720 	spin_lock(&vb->lock);
1721 
1722 	/* Expand dirty range */
1723 	vb->dirty_min = min(vb->dirty_min, offset);
1724 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1725 
1726 	vb->dirty += 1UL << order;
1727 	if (vb->dirty == VMAP_BBMAP_BITS) {
1728 		BUG_ON(vb->free);
1729 		spin_unlock(&vb->lock);
1730 		free_vmap_block(vb);
1731 	} else
1732 		spin_unlock(&vb->lock);
1733 }
1734 
1735 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
1736 {
1737 	int cpu;
1738 
1739 	if (unlikely(!vmap_initialized))
1740 		return;
1741 
1742 	might_sleep();
1743 
1744 	for_each_possible_cpu(cpu) {
1745 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1746 		struct vmap_block *vb;
1747 
1748 		rcu_read_lock();
1749 		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1750 			spin_lock(&vb->lock);
1751 			if (vb->dirty) {
1752 				unsigned long va_start = vb->va->va_start;
1753 				unsigned long s, e;
1754 
1755 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
1756 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
1757 
1758 				start = min(s, start);
1759 				end   = max(e, end);
1760 
1761 				flush = 1;
1762 			}
1763 			spin_unlock(&vb->lock);
1764 		}
1765 		rcu_read_unlock();
1766 	}
1767 
1768 	mutex_lock(&vmap_purge_lock);
1769 	purge_fragmented_blocks_allcpus();
1770 	if (!__purge_vmap_area_lazy(start, end) && flush)
1771 		flush_tlb_kernel_range(start, end);
1772 	mutex_unlock(&vmap_purge_lock);
1773 }
1774 
1775 /**
1776  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1777  *
1778  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1779  * to amortize TLB flushing overheads. What this means is that any page you
1780  * have now, may, in a former life, have been mapped into kernel virtual
1781  * address by the vmap layer and so there might be some CPUs with TLB entries
1782  * still referencing that page (additional to the regular 1:1 kernel mapping).
1783  *
1784  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1785  * be sure that none of the pages we have control over will have any aliases
1786  * from the vmap layer.
1787  */
1788 void vm_unmap_aliases(void)
1789 {
1790 	unsigned long start = ULONG_MAX, end = 0;
1791 	int flush = 0;
1792 
1793 	_vm_unmap_aliases(start, end, flush);
1794 }
1795 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1796 
1797 /**
1798  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1799  * @mem: the pointer returned by vm_map_ram
1800  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1801  */
1802 void vm_unmap_ram(const void *mem, unsigned int count)
1803 {
1804 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1805 	unsigned long addr = (unsigned long)mem;
1806 	struct vmap_area *va;
1807 
1808 	might_sleep();
1809 	BUG_ON(!addr);
1810 	BUG_ON(addr < VMALLOC_START);
1811 	BUG_ON(addr > VMALLOC_END);
1812 	BUG_ON(!PAGE_ALIGNED(addr));
1813 
1814 	kasan_poison_vmalloc(mem, size);
1815 
1816 	if (likely(count <= VMAP_MAX_ALLOC)) {
1817 		debug_check_no_locks_freed(mem, size);
1818 		vb_free(addr, size);
1819 		return;
1820 	}
1821 
1822 	va = find_vmap_area(addr);
1823 	BUG_ON(!va);
1824 	debug_check_no_locks_freed((void *)va->va_start,
1825 				    (va->va_end - va->va_start));
1826 	free_unmap_vmap_area(va);
1827 }
1828 EXPORT_SYMBOL(vm_unmap_ram);
1829 
1830 /**
1831  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1832  * @pages: an array of pointers to the pages to be mapped
1833  * @count: number of pages
1834  * @node: prefer to allocate data structures on this node
1835  *
1836  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1837  * faster than vmap so it's good.  But if you mix long-life and short-life
1838  * objects with vm_map_ram(), it could consume lots of address space through
1839  * fragmentation (especially on a 32bit machine).  You could see failures in
1840  * the end.  Please use this function for short-lived objects.
1841  *
1842  * Returns: a pointer to the address that has been mapped, or %NULL on failure
1843  */
1844 void *vm_map_ram(struct page **pages, unsigned int count, int node)
1845 {
1846 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1847 	unsigned long addr;
1848 	void *mem;
1849 
1850 	if (likely(count <= VMAP_MAX_ALLOC)) {
1851 		mem = vb_alloc(size, GFP_KERNEL);
1852 		if (IS_ERR(mem))
1853 			return NULL;
1854 		addr = (unsigned long)mem;
1855 	} else {
1856 		struct vmap_area *va;
1857 		va = alloc_vmap_area(size, PAGE_SIZE,
1858 				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1859 		if (IS_ERR(va))
1860 			return NULL;
1861 
1862 		addr = va->va_start;
1863 		mem = (void *)addr;
1864 	}
1865 
1866 	kasan_unpoison_vmalloc(mem, size);
1867 
1868 	if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
1869 		vm_unmap_ram(mem, count);
1870 		return NULL;
1871 	}
1872 	return mem;
1873 }
1874 EXPORT_SYMBOL(vm_map_ram);
1875 
1876 static struct vm_struct *vmlist __initdata;
1877 
1878 /**
1879  * vm_area_add_early - add vmap area early during boot
1880  * @vm: vm_struct to add
1881  *
1882  * This function is used to add fixed kernel vm area to vmlist before
1883  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
1884  * should contain proper values and the other fields should be zero.
1885  *
1886  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1887  */
1888 void __init vm_area_add_early(struct vm_struct *vm)
1889 {
1890 	struct vm_struct *tmp, **p;
1891 
1892 	BUG_ON(vmap_initialized);
1893 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1894 		if (tmp->addr >= vm->addr) {
1895 			BUG_ON(tmp->addr < vm->addr + vm->size);
1896 			break;
1897 		} else
1898 			BUG_ON(tmp->addr + tmp->size > vm->addr);
1899 	}
1900 	vm->next = *p;
1901 	*p = vm;
1902 }
1903 
1904 /**
1905  * vm_area_register_early - register vmap area early during boot
1906  * @vm: vm_struct to register
1907  * @align: requested alignment
1908  *
1909  * This function is used to register kernel vm area before
1910  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
1911  * proper values on entry and other fields should be zero.  On return,
1912  * vm->addr contains the allocated address.
1913  *
1914  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1915  */
1916 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1917 {
1918 	static size_t vm_init_off __initdata;
1919 	unsigned long addr;
1920 
1921 	addr = ALIGN(VMALLOC_START + vm_init_off, align);
1922 	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1923 
1924 	vm->addr = (void *)addr;
1925 
1926 	vm_area_add_early(vm);
1927 }
1928 
1929 static void vmap_init_free_space(void)
1930 {
1931 	unsigned long vmap_start = 1;
1932 	const unsigned long vmap_end = ULONG_MAX;
1933 	struct vmap_area *busy, *free;
1934 
1935 	/*
1936 	 *     B     F     B     B     B     F
1937 	 * -|-----|.....|-----|-----|-----|.....|-
1938 	 *  |           The KVA space           |
1939 	 *  |<--------------------------------->|
1940 	 */
1941 	list_for_each_entry(busy, &vmap_area_list, list) {
1942 		if (busy->va_start - vmap_start > 0) {
1943 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1944 			if (!WARN_ON_ONCE(!free)) {
1945 				free->va_start = vmap_start;
1946 				free->va_end = busy->va_start;
1947 
1948 				insert_vmap_area_augment(free, NULL,
1949 					&free_vmap_area_root,
1950 						&free_vmap_area_list);
1951 			}
1952 		}
1953 
1954 		vmap_start = busy->va_end;
1955 	}
1956 
1957 	if (vmap_end - vmap_start > 0) {
1958 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1959 		if (!WARN_ON_ONCE(!free)) {
1960 			free->va_start = vmap_start;
1961 			free->va_end = vmap_end;
1962 
1963 			insert_vmap_area_augment(free, NULL,
1964 				&free_vmap_area_root,
1965 					&free_vmap_area_list);
1966 		}
1967 	}
1968 }
1969 
1970 void __init vmalloc_init(void)
1971 {
1972 	struct vmap_area *va;
1973 	struct vm_struct *tmp;
1974 	int i;
1975 
1976 	/*
1977 	 * Create the cache for vmap_area objects.
1978 	 */
1979 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1980 
1981 	for_each_possible_cpu(i) {
1982 		struct vmap_block_queue *vbq;
1983 		struct vfree_deferred *p;
1984 
1985 		vbq = &per_cpu(vmap_block_queue, i);
1986 		spin_lock_init(&vbq->lock);
1987 		INIT_LIST_HEAD(&vbq->free);
1988 		p = &per_cpu(vfree_deferred, i);
1989 		init_llist_head(&p->list);
1990 		INIT_WORK(&p->wq, free_work);
1991 	}
1992 
1993 	/* Import existing vmlist entries. */
1994 	for (tmp = vmlist; tmp; tmp = tmp->next) {
1995 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1996 		if (WARN_ON_ONCE(!va))
1997 			continue;
1998 
1999 		va->va_start = (unsigned long)tmp->addr;
2000 		va->va_end = va->va_start + tmp->size;
2001 		va->vm = tmp;
2002 		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2003 	}
2004 
2005 	/*
2006 	 * Now we can initialize a free vmap space.
2007 	 */
2008 	vmap_init_free_space();
2009 	vmap_initialized = true;
2010 }
2011 
2012 /**
2013  * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2014  * @addr: start of the VM area to unmap
2015  * @size: size of the VM area to unmap
2016  *
2017  * Similar to unmap_kernel_range_noflush() but flushes vcache before
2018  * the unmapping and tlb after.
2019  */
2020 void unmap_kernel_range(unsigned long addr, unsigned long size)
2021 {
2022 	unsigned long end = addr + size;
2023 
2024 	flush_cache_vunmap(addr, end);
2025 	unmap_kernel_range_noflush(addr, size);
2026 	flush_tlb_kernel_range(addr, end);
2027 }
2028 
2029 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2030 	struct vmap_area *va, unsigned long flags, const void *caller)
2031 {
2032 	vm->flags = flags;
2033 	vm->addr = (void *)va->va_start;
2034 	vm->size = va->va_end - va->va_start;
2035 	vm->caller = caller;
2036 	va->vm = vm;
2037 }
2038 
2039 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2040 			      unsigned long flags, const void *caller)
2041 {
2042 	spin_lock(&vmap_area_lock);
2043 	setup_vmalloc_vm_locked(vm, va, flags, caller);
2044 	spin_unlock(&vmap_area_lock);
2045 }
2046 
2047 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2048 {
2049 	/*
2050 	 * Before removing VM_UNINITIALIZED,
2051 	 * we should make sure that vm has proper values.
2052 	 * Pair with smp_rmb() in show_numa_info().
2053 	 */
2054 	smp_wmb();
2055 	vm->flags &= ~VM_UNINITIALIZED;
2056 }
2057 
2058 static struct vm_struct *__get_vm_area_node(unsigned long size,
2059 		unsigned long align, unsigned long flags, unsigned long start,
2060 		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
2061 {
2062 	struct vmap_area *va;
2063 	struct vm_struct *area;
2064 	unsigned long requested_size = size;
2065 
2066 	BUG_ON(in_interrupt());
2067 	size = PAGE_ALIGN(size);
2068 	if (unlikely(!size))
2069 		return NULL;
2070 
2071 	if (flags & VM_IOREMAP)
2072 		align = 1ul << clamp_t(int, get_count_order_long(size),
2073 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2074 
2075 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2076 	if (unlikely(!area))
2077 		return NULL;
2078 
2079 	if (!(flags & VM_NO_GUARD))
2080 		size += PAGE_SIZE;
2081 
2082 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2083 	if (IS_ERR(va)) {
2084 		kfree(area);
2085 		return NULL;
2086 	}
2087 
2088 	kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2089 
2090 	setup_vmalloc_vm(area, va, flags, caller);
2091 
2092 	return area;
2093 }
2094 
2095 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2096 				       unsigned long start, unsigned long end,
2097 				       const void *caller)
2098 {
2099 	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2100 				  GFP_KERNEL, caller);
2101 }
2102 
2103 /**
2104  * get_vm_area - reserve a contiguous kernel virtual area
2105  * @size:	 size of the area
2106  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2107  *
2108  * Search an area of @size in the kernel virtual mapping area,
2109  * and reserved it for out purposes.  Returns the area descriptor
2110  * on success or %NULL on failure.
2111  *
2112  * Return: the area descriptor on success or %NULL on failure.
2113  */
2114 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2115 {
2116 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2117 				  NUMA_NO_NODE, GFP_KERNEL,
2118 				  __builtin_return_address(0));
2119 }
2120 
2121 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2122 				const void *caller)
2123 {
2124 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2125 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2126 }
2127 
2128 /**
2129  * find_vm_area - find a continuous kernel virtual area
2130  * @addr:	  base address
2131  *
2132  * Search for the kernel VM area starting at @addr, and return it.
2133  * It is up to the caller to do all required locking to keep the returned
2134  * pointer valid.
2135  *
2136  * Return: pointer to the found area or %NULL on faulure
2137  */
2138 struct vm_struct *find_vm_area(const void *addr)
2139 {
2140 	struct vmap_area *va;
2141 
2142 	va = find_vmap_area((unsigned long)addr);
2143 	if (!va)
2144 		return NULL;
2145 
2146 	return va->vm;
2147 }
2148 
2149 /**
2150  * remove_vm_area - find and remove a continuous kernel virtual area
2151  * @addr:	    base address
2152  *
2153  * Search for the kernel VM area starting at @addr, and remove it.
2154  * This function returns the found VM area, but using it is NOT safe
2155  * on SMP machines, except for its size or flags.
2156  *
2157  * Return: pointer to the found area or %NULL on faulure
2158  */
2159 struct vm_struct *remove_vm_area(const void *addr)
2160 {
2161 	struct vmap_area *va;
2162 
2163 	might_sleep();
2164 
2165 	spin_lock(&vmap_area_lock);
2166 	va = __find_vmap_area((unsigned long)addr);
2167 	if (va && va->vm) {
2168 		struct vm_struct *vm = va->vm;
2169 
2170 		va->vm = NULL;
2171 		spin_unlock(&vmap_area_lock);
2172 
2173 		kasan_free_shadow(vm);
2174 		free_unmap_vmap_area(va);
2175 
2176 		return vm;
2177 	}
2178 
2179 	spin_unlock(&vmap_area_lock);
2180 	return NULL;
2181 }
2182 
2183 static inline void set_area_direct_map(const struct vm_struct *area,
2184 				       int (*set_direct_map)(struct page *page))
2185 {
2186 	int i;
2187 
2188 	for (i = 0; i < area->nr_pages; i++)
2189 		if (page_address(area->pages[i]))
2190 			set_direct_map(area->pages[i]);
2191 }
2192 
2193 /* Handle removing and resetting vm mappings related to the vm_struct. */
2194 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2195 {
2196 	unsigned long start = ULONG_MAX, end = 0;
2197 	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2198 	int flush_dmap = 0;
2199 	int i;
2200 
2201 	remove_vm_area(area->addr);
2202 
2203 	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2204 	if (!flush_reset)
2205 		return;
2206 
2207 	/*
2208 	 * If not deallocating pages, just do the flush of the VM area and
2209 	 * return.
2210 	 */
2211 	if (!deallocate_pages) {
2212 		vm_unmap_aliases();
2213 		return;
2214 	}
2215 
2216 	/*
2217 	 * If execution gets here, flush the vm mapping and reset the direct
2218 	 * map. Find the start and end range of the direct mappings to make sure
2219 	 * the vm_unmap_aliases() flush includes the direct map.
2220 	 */
2221 	for (i = 0; i < area->nr_pages; i++) {
2222 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
2223 		if (addr) {
2224 			start = min(addr, start);
2225 			end = max(addr + PAGE_SIZE, end);
2226 			flush_dmap = 1;
2227 		}
2228 	}
2229 
2230 	/*
2231 	 * Set direct map to something invalid so that it won't be cached if
2232 	 * there are any accesses after the TLB flush, then flush the TLB and
2233 	 * reset the direct map permissions to the default.
2234 	 */
2235 	set_area_direct_map(area, set_direct_map_invalid_noflush);
2236 	_vm_unmap_aliases(start, end, flush_dmap);
2237 	set_area_direct_map(area, set_direct_map_default_noflush);
2238 }
2239 
2240 static void __vunmap(const void *addr, int deallocate_pages)
2241 {
2242 	struct vm_struct *area;
2243 
2244 	if (!addr)
2245 		return;
2246 
2247 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2248 			addr))
2249 		return;
2250 
2251 	area = find_vm_area(addr);
2252 	if (unlikely(!area)) {
2253 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2254 				addr);
2255 		return;
2256 	}
2257 
2258 	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2259 	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2260 
2261 	kasan_poison_vmalloc(area->addr, area->size);
2262 
2263 	vm_remove_mappings(area, deallocate_pages);
2264 
2265 	if (deallocate_pages) {
2266 		int i;
2267 
2268 		for (i = 0; i < area->nr_pages; i++) {
2269 			struct page *page = area->pages[i];
2270 
2271 			BUG_ON(!page);
2272 			__free_pages(page, 0);
2273 		}
2274 		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2275 
2276 		kvfree(area->pages);
2277 	}
2278 
2279 	kfree(area);
2280 	return;
2281 }
2282 
2283 static inline void __vfree_deferred(const void *addr)
2284 {
2285 	/*
2286 	 * Use raw_cpu_ptr() because this can be called from preemptible
2287 	 * context. Preemption is absolutely fine here, because the llist_add()
2288 	 * implementation is lockless, so it works even if we are adding to
2289 	 * another cpu's list. schedule_work() should be fine with this too.
2290 	 */
2291 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2292 
2293 	if (llist_add((struct llist_node *)addr, &p->list))
2294 		schedule_work(&p->wq);
2295 }
2296 
2297 /**
2298  * vfree_atomic - release memory allocated by vmalloc()
2299  * @addr:	  memory base address
2300  *
2301  * This one is just like vfree() but can be called in any atomic context
2302  * except NMIs.
2303  */
2304 void vfree_atomic(const void *addr)
2305 {
2306 	BUG_ON(in_nmi());
2307 
2308 	kmemleak_free(addr);
2309 
2310 	if (!addr)
2311 		return;
2312 	__vfree_deferred(addr);
2313 }
2314 
2315 static void __vfree(const void *addr)
2316 {
2317 	if (unlikely(in_interrupt()))
2318 		__vfree_deferred(addr);
2319 	else
2320 		__vunmap(addr, 1);
2321 }
2322 
2323 /**
2324  * vfree - release memory allocated by vmalloc()
2325  * @addr:  memory base address
2326  *
2327  * Free the virtually continuous memory area starting at @addr, as
2328  * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2329  * NULL, no operation is performed.
2330  *
2331  * Must not be called in NMI context (strictly speaking, only if we don't
2332  * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2333  * conventions for vfree() arch-depenedent would be a really bad idea)
2334  *
2335  * May sleep if called *not* from interrupt context.
2336  *
2337  * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
2338  */
2339 void vfree(const void *addr)
2340 {
2341 	BUG_ON(in_nmi());
2342 
2343 	kmemleak_free(addr);
2344 
2345 	might_sleep_if(!in_interrupt());
2346 
2347 	if (!addr)
2348 		return;
2349 
2350 	__vfree(addr);
2351 }
2352 EXPORT_SYMBOL(vfree);
2353 
2354 /**
2355  * vunmap - release virtual mapping obtained by vmap()
2356  * @addr:   memory base address
2357  *
2358  * Free the virtually contiguous memory area starting at @addr,
2359  * which was created from the page array passed to vmap().
2360  *
2361  * Must not be called in interrupt context.
2362  */
2363 void vunmap(const void *addr)
2364 {
2365 	BUG_ON(in_interrupt());
2366 	might_sleep();
2367 	if (addr)
2368 		__vunmap(addr, 0);
2369 }
2370 EXPORT_SYMBOL(vunmap);
2371 
2372 /**
2373  * vmap - map an array of pages into virtually contiguous space
2374  * @pages: array of page pointers
2375  * @count: number of pages to map
2376  * @flags: vm_area->flags
2377  * @prot: page protection for the mapping
2378  *
2379  * Maps @count pages from @pages into contiguous kernel virtual
2380  * space.
2381  *
2382  * Return: the address of the area or %NULL on failure
2383  */
2384 void *vmap(struct page **pages, unsigned int count,
2385 	   unsigned long flags, pgprot_t prot)
2386 {
2387 	struct vm_struct *area;
2388 	unsigned long size;		/* In bytes */
2389 
2390 	might_sleep();
2391 
2392 	if (count > totalram_pages())
2393 		return NULL;
2394 
2395 	size = (unsigned long)count << PAGE_SHIFT;
2396 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2397 	if (!area)
2398 		return NULL;
2399 
2400 	if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
2401 			pages) < 0) {
2402 		vunmap(area->addr);
2403 		return NULL;
2404 	}
2405 
2406 	return area->addr;
2407 }
2408 EXPORT_SYMBOL(vmap);
2409 
2410 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2411 				 pgprot_t prot, int node)
2412 {
2413 	struct page **pages;
2414 	unsigned int nr_pages, array_size, i;
2415 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2416 	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2417 	const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2418 					0 :
2419 					__GFP_HIGHMEM;
2420 
2421 	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2422 	array_size = (nr_pages * sizeof(struct page *));
2423 
2424 	/* Please note that the recursion is strictly bounded. */
2425 	if (array_size > PAGE_SIZE) {
2426 		pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
2427 				node, area->caller);
2428 	} else {
2429 		pages = kmalloc_node(array_size, nested_gfp, node);
2430 	}
2431 
2432 	if (!pages) {
2433 		remove_vm_area(area->addr);
2434 		kfree(area);
2435 		return NULL;
2436 	}
2437 
2438 	area->pages = pages;
2439 	area->nr_pages = nr_pages;
2440 
2441 	for (i = 0; i < area->nr_pages; i++) {
2442 		struct page *page;
2443 
2444 		if (node == NUMA_NO_NODE)
2445 			page = alloc_page(alloc_mask|highmem_mask);
2446 		else
2447 			page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
2448 
2449 		if (unlikely(!page)) {
2450 			/* Successfully allocated i pages, free them in __vunmap() */
2451 			area->nr_pages = i;
2452 			atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2453 			goto fail;
2454 		}
2455 		area->pages[i] = page;
2456 		if (gfpflags_allow_blocking(gfp_mask))
2457 			cond_resched();
2458 	}
2459 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2460 
2461 	if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
2462 			prot, pages) < 0)
2463 		goto fail;
2464 
2465 	return area->addr;
2466 
2467 fail:
2468 	warn_alloc(gfp_mask, NULL,
2469 			  "vmalloc: allocation failure, allocated %ld of %ld bytes",
2470 			  (area->nr_pages*PAGE_SIZE), area->size);
2471 	__vfree(area->addr);
2472 	return NULL;
2473 }
2474 
2475 /**
2476  * __vmalloc_node_range - allocate virtually contiguous memory
2477  * @size:		  allocation size
2478  * @align:		  desired alignment
2479  * @start:		  vm area range start
2480  * @end:		  vm area range end
2481  * @gfp_mask:		  flags for the page level allocator
2482  * @prot:		  protection mask for the allocated pages
2483  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
2484  * @node:		  node to use for allocation or NUMA_NO_NODE
2485  * @caller:		  caller's return address
2486  *
2487  * Allocate enough pages to cover @size from the page level
2488  * allocator with @gfp_mask flags.  Map them into contiguous
2489  * kernel virtual space, using a pagetable protection of @prot.
2490  *
2491  * Return: the address of the area or %NULL on failure
2492  */
2493 void *__vmalloc_node_range(unsigned long size, unsigned long align,
2494 			unsigned long start, unsigned long end, gfp_t gfp_mask,
2495 			pgprot_t prot, unsigned long vm_flags, int node,
2496 			const void *caller)
2497 {
2498 	struct vm_struct *area;
2499 	void *addr;
2500 	unsigned long real_size = size;
2501 
2502 	size = PAGE_ALIGN(size);
2503 	if (!size || (size >> PAGE_SHIFT) > totalram_pages())
2504 		goto fail;
2505 
2506 	area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
2507 				vm_flags, start, end, node, gfp_mask, caller);
2508 	if (!area)
2509 		goto fail;
2510 
2511 	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2512 	if (!addr)
2513 		return NULL;
2514 
2515 	/*
2516 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2517 	 * flag. It means that vm_struct is not fully initialized.
2518 	 * Now, it is fully initialized, so remove this flag here.
2519 	 */
2520 	clear_vm_uninitialized_flag(area);
2521 
2522 	kmemleak_vmalloc(area, size, gfp_mask);
2523 
2524 	return addr;
2525 
2526 fail:
2527 	warn_alloc(gfp_mask, NULL,
2528 			  "vmalloc: allocation failure: %lu bytes", real_size);
2529 	return NULL;
2530 }
2531 
2532 /**
2533  * __vmalloc_node - allocate virtually contiguous memory
2534  * @size:	    allocation size
2535  * @align:	    desired alignment
2536  * @gfp_mask:	    flags for the page level allocator
2537  * @node:	    node to use for allocation or NUMA_NO_NODE
2538  * @caller:	    caller's return address
2539  *
2540  * Allocate enough pages to cover @size from the page level allocator with
2541  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
2542  *
2543  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2544  * and __GFP_NOFAIL are not supported
2545  *
2546  * Any use of gfp flags outside of GFP_KERNEL should be consulted
2547  * with mm people.
2548  *
2549  * Return: pointer to the allocated memory or %NULL on error
2550  */
2551 void *__vmalloc_node(unsigned long size, unsigned long align,
2552 			    gfp_t gfp_mask, int node, const void *caller)
2553 {
2554 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2555 				gfp_mask, PAGE_KERNEL, 0, node, caller);
2556 }
2557 /*
2558  * This is only for performance analysis of vmalloc and stress purpose.
2559  * It is required by vmalloc test module, therefore do not use it other
2560  * than that.
2561  */
2562 #ifdef CONFIG_TEST_VMALLOC_MODULE
2563 EXPORT_SYMBOL_GPL(__vmalloc_node);
2564 #endif
2565 
2566 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
2567 {
2568 	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
2569 				__builtin_return_address(0));
2570 }
2571 EXPORT_SYMBOL(__vmalloc);
2572 
2573 /**
2574  * vmalloc - allocate virtually contiguous memory
2575  * @size:    allocation size
2576  *
2577  * Allocate enough pages to cover @size from the page level
2578  * allocator and map them into contiguous kernel virtual space.
2579  *
2580  * For tight control over page level allocator and protection flags
2581  * use __vmalloc() instead.
2582  *
2583  * Return: pointer to the allocated memory or %NULL on error
2584  */
2585 void *vmalloc(unsigned long size)
2586 {
2587 	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
2588 				__builtin_return_address(0));
2589 }
2590 EXPORT_SYMBOL(vmalloc);
2591 
2592 /**
2593  * vzalloc - allocate virtually contiguous memory with zero fill
2594  * @size:    allocation size
2595  *
2596  * Allocate enough pages to cover @size from the page level
2597  * allocator and map them into contiguous kernel virtual space.
2598  * The memory allocated is set to zero.
2599  *
2600  * For tight control over page level allocator and protection flags
2601  * use __vmalloc() instead.
2602  *
2603  * Return: pointer to the allocated memory or %NULL on error
2604  */
2605 void *vzalloc(unsigned long size)
2606 {
2607 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
2608 				__builtin_return_address(0));
2609 }
2610 EXPORT_SYMBOL(vzalloc);
2611 
2612 /**
2613  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2614  * @size: allocation size
2615  *
2616  * The resulting memory area is zeroed so it can be mapped to userspace
2617  * without leaking data.
2618  *
2619  * Return: pointer to the allocated memory or %NULL on error
2620  */
2621 void *vmalloc_user(unsigned long size)
2622 {
2623 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2624 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2625 				    VM_USERMAP, NUMA_NO_NODE,
2626 				    __builtin_return_address(0));
2627 }
2628 EXPORT_SYMBOL(vmalloc_user);
2629 
2630 /**
2631  * vmalloc_node - allocate memory on a specific node
2632  * @size:	  allocation size
2633  * @node:	  numa node
2634  *
2635  * Allocate enough pages to cover @size from the page level
2636  * allocator and map them into contiguous kernel virtual space.
2637  *
2638  * For tight control over page level allocator and protection flags
2639  * use __vmalloc() instead.
2640  *
2641  * Return: pointer to the allocated memory or %NULL on error
2642  */
2643 void *vmalloc_node(unsigned long size, int node)
2644 {
2645 	return __vmalloc_node(size, 1, GFP_KERNEL, node,
2646 			__builtin_return_address(0));
2647 }
2648 EXPORT_SYMBOL(vmalloc_node);
2649 
2650 /**
2651  * vzalloc_node - allocate memory on a specific node with zero fill
2652  * @size:	allocation size
2653  * @node:	numa node
2654  *
2655  * Allocate enough pages to cover @size from the page level
2656  * allocator and map them into contiguous kernel virtual space.
2657  * The memory allocated is set to zero.
2658  *
2659  * Return: pointer to the allocated memory or %NULL on error
2660  */
2661 void *vzalloc_node(unsigned long size, int node)
2662 {
2663 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
2664 				__builtin_return_address(0));
2665 }
2666 EXPORT_SYMBOL(vzalloc_node);
2667 
2668 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2669 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2670 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2671 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2672 #else
2673 /*
2674  * 64b systems should always have either DMA or DMA32 zones. For others
2675  * GFP_DMA32 should do the right thing and use the normal zone.
2676  */
2677 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2678 #endif
2679 
2680 /**
2681  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2682  * @size:	allocation size
2683  *
2684  * Allocate enough 32bit PA addressable pages to cover @size from the
2685  * page level allocator and map them into contiguous kernel virtual space.
2686  *
2687  * Return: pointer to the allocated memory or %NULL on error
2688  */
2689 void *vmalloc_32(unsigned long size)
2690 {
2691 	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
2692 			__builtin_return_address(0));
2693 }
2694 EXPORT_SYMBOL(vmalloc_32);
2695 
2696 /**
2697  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2698  * @size:	     allocation size
2699  *
2700  * The resulting memory area is 32bit addressable and zeroed so it can be
2701  * mapped to userspace without leaking data.
2702  *
2703  * Return: pointer to the allocated memory or %NULL on error
2704  */
2705 void *vmalloc_32_user(unsigned long size)
2706 {
2707 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2708 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2709 				    VM_USERMAP, NUMA_NO_NODE,
2710 				    __builtin_return_address(0));
2711 }
2712 EXPORT_SYMBOL(vmalloc_32_user);
2713 
2714 /*
2715  * small helper routine , copy contents to buf from addr.
2716  * If the page is not present, fill zero.
2717  */
2718 
2719 static int aligned_vread(char *buf, char *addr, unsigned long count)
2720 {
2721 	struct page *p;
2722 	int copied = 0;
2723 
2724 	while (count) {
2725 		unsigned long offset, length;
2726 
2727 		offset = offset_in_page(addr);
2728 		length = PAGE_SIZE - offset;
2729 		if (length > count)
2730 			length = count;
2731 		p = vmalloc_to_page(addr);
2732 		/*
2733 		 * To do safe access to this _mapped_ area, we need
2734 		 * lock. But adding lock here means that we need to add
2735 		 * overhead of vmalloc()/vfree() calles for this _debug_
2736 		 * interface, rarely used. Instead of that, we'll use
2737 		 * kmap() and get small overhead in this access function.
2738 		 */
2739 		if (p) {
2740 			/*
2741 			 * we can expect USER0 is not used (see vread/vwrite's
2742 			 * function description)
2743 			 */
2744 			void *map = kmap_atomic(p);
2745 			memcpy(buf, map + offset, length);
2746 			kunmap_atomic(map);
2747 		} else
2748 			memset(buf, 0, length);
2749 
2750 		addr += length;
2751 		buf += length;
2752 		copied += length;
2753 		count -= length;
2754 	}
2755 	return copied;
2756 }
2757 
2758 static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2759 {
2760 	struct page *p;
2761 	int copied = 0;
2762 
2763 	while (count) {
2764 		unsigned long offset, length;
2765 
2766 		offset = offset_in_page(addr);
2767 		length = PAGE_SIZE - offset;
2768 		if (length > count)
2769 			length = count;
2770 		p = vmalloc_to_page(addr);
2771 		/*
2772 		 * To do safe access to this _mapped_ area, we need
2773 		 * lock. But adding lock here means that we need to add
2774 		 * overhead of vmalloc()/vfree() calles for this _debug_
2775 		 * interface, rarely used. Instead of that, we'll use
2776 		 * kmap() and get small overhead in this access function.
2777 		 */
2778 		if (p) {
2779 			/*
2780 			 * we can expect USER0 is not used (see vread/vwrite's
2781 			 * function description)
2782 			 */
2783 			void *map = kmap_atomic(p);
2784 			memcpy(map + offset, buf, length);
2785 			kunmap_atomic(map);
2786 		}
2787 		addr += length;
2788 		buf += length;
2789 		copied += length;
2790 		count -= length;
2791 	}
2792 	return copied;
2793 }
2794 
2795 /**
2796  * vread() - read vmalloc area in a safe way.
2797  * @buf:     buffer for reading data
2798  * @addr:    vm address.
2799  * @count:   number of bytes to be read.
2800  *
2801  * This function checks that addr is a valid vmalloc'ed area, and
2802  * copy data from that area to a given buffer. If the given memory range
2803  * of [addr...addr+count) includes some valid address, data is copied to
2804  * proper area of @buf. If there are memory holes, they'll be zero-filled.
2805  * IOREMAP area is treated as memory hole and no copy is done.
2806  *
2807  * If [addr...addr+count) doesn't includes any intersects with alive
2808  * vm_struct area, returns 0. @buf should be kernel's buffer.
2809  *
2810  * Note: In usual ops, vread() is never necessary because the caller
2811  * should know vmalloc() area is valid and can use memcpy().
2812  * This is for routines which have to access vmalloc area without
2813  * any information, as /dev/kmem.
2814  *
2815  * Return: number of bytes for which addr and buf should be increased
2816  * (same number as @count) or %0 if [addr...addr+count) doesn't
2817  * include any intersection with valid vmalloc area
2818  */
2819 long vread(char *buf, char *addr, unsigned long count)
2820 {
2821 	struct vmap_area *va;
2822 	struct vm_struct *vm;
2823 	char *vaddr, *buf_start = buf;
2824 	unsigned long buflen = count;
2825 	unsigned long n;
2826 
2827 	/* Don't allow overflow */
2828 	if ((unsigned long) addr + count < count)
2829 		count = -(unsigned long) addr;
2830 
2831 	spin_lock(&vmap_area_lock);
2832 	list_for_each_entry(va, &vmap_area_list, list) {
2833 		if (!count)
2834 			break;
2835 
2836 		if (!va->vm)
2837 			continue;
2838 
2839 		vm = va->vm;
2840 		vaddr = (char *) vm->addr;
2841 		if (addr >= vaddr + get_vm_area_size(vm))
2842 			continue;
2843 		while (addr < vaddr) {
2844 			if (count == 0)
2845 				goto finished;
2846 			*buf = '\0';
2847 			buf++;
2848 			addr++;
2849 			count--;
2850 		}
2851 		n = vaddr + get_vm_area_size(vm) - addr;
2852 		if (n > count)
2853 			n = count;
2854 		if (!(vm->flags & VM_IOREMAP))
2855 			aligned_vread(buf, addr, n);
2856 		else /* IOREMAP area is treated as memory hole */
2857 			memset(buf, 0, n);
2858 		buf += n;
2859 		addr += n;
2860 		count -= n;
2861 	}
2862 finished:
2863 	spin_unlock(&vmap_area_lock);
2864 
2865 	if (buf == buf_start)
2866 		return 0;
2867 	/* zero-fill memory holes */
2868 	if (buf != buf_start + buflen)
2869 		memset(buf, 0, buflen - (buf - buf_start));
2870 
2871 	return buflen;
2872 }
2873 
2874 /**
2875  * vwrite() - write vmalloc area in a safe way.
2876  * @buf:      buffer for source data
2877  * @addr:     vm address.
2878  * @count:    number of bytes to be read.
2879  *
2880  * This function checks that addr is a valid vmalloc'ed area, and
2881  * copy data from a buffer to the given addr. If specified range of
2882  * [addr...addr+count) includes some valid address, data is copied from
2883  * proper area of @buf. If there are memory holes, no copy to hole.
2884  * IOREMAP area is treated as memory hole and no copy is done.
2885  *
2886  * If [addr...addr+count) doesn't includes any intersects with alive
2887  * vm_struct area, returns 0. @buf should be kernel's buffer.
2888  *
2889  * Note: In usual ops, vwrite() is never necessary because the caller
2890  * should know vmalloc() area is valid and can use memcpy().
2891  * This is for routines which have to access vmalloc area without
2892  * any information, as /dev/kmem.
2893  *
2894  * Return: number of bytes for which addr and buf should be
2895  * increased (same number as @count) or %0 if [addr...addr+count)
2896  * doesn't include any intersection with valid vmalloc area
2897  */
2898 long vwrite(char *buf, char *addr, unsigned long count)
2899 {
2900 	struct vmap_area *va;
2901 	struct vm_struct *vm;
2902 	char *vaddr;
2903 	unsigned long n, buflen;
2904 	int copied = 0;
2905 
2906 	/* Don't allow overflow */
2907 	if ((unsigned long) addr + count < count)
2908 		count = -(unsigned long) addr;
2909 	buflen = count;
2910 
2911 	spin_lock(&vmap_area_lock);
2912 	list_for_each_entry(va, &vmap_area_list, list) {
2913 		if (!count)
2914 			break;
2915 
2916 		if (!va->vm)
2917 			continue;
2918 
2919 		vm = va->vm;
2920 		vaddr = (char *) vm->addr;
2921 		if (addr >= vaddr + get_vm_area_size(vm))
2922 			continue;
2923 		while (addr < vaddr) {
2924 			if (count == 0)
2925 				goto finished;
2926 			buf++;
2927 			addr++;
2928 			count--;
2929 		}
2930 		n = vaddr + get_vm_area_size(vm) - addr;
2931 		if (n > count)
2932 			n = count;
2933 		if (!(vm->flags & VM_IOREMAP)) {
2934 			aligned_vwrite(buf, addr, n);
2935 			copied++;
2936 		}
2937 		buf += n;
2938 		addr += n;
2939 		count -= n;
2940 	}
2941 finished:
2942 	spin_unlock(&vmap_area_lock);
2943 	if (!copied)
2944 		return 0;
2945 	return buflen;
2946 }
2947 
2948 /**
2949  * remap_vmalloc_range_partial - map vmalloc pages to userspace
2950  * @vma:		vma to cover
2951  * @uaddr:		target user address to start at
2952  * @kaddr:		virtual address of vmalloc kernel memory
2953  * @pgoff:		offset from @kaddr to start at
2954  * @size:		size of map area
2955  *
2956  * Returns:	0 for success, -Exxx on failure
2957  *
2958  * This function checks that @kaddr is a valid vmalloc'ed area,
2959  * and that it is big enough to cover the range starting at
2960  * @uaddr in @vma. Will return failure if that criteria isn't
2961  * met.
2962  *
2963  * Similar to remap_pfn_range() (see mm/memory.c)
2964  */
2965 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2966 				void *kaddr, unsigned long pgoff,
2967 				unsigned long size)
2968 {
2969 	struct vm_struct *area;
2970 	unsigned long off;
2971 	unsigned long end_index;
2972 
2973 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
2974 		return -EINVAL;
2975 
2976 	size = PAGE_ALIGN(size);
2977 
2978 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2979 		return -EINVAL;
2980 
2981 	area = find_vm_area(kaddr);
2982 	if (!area)
2983 		return -EINVAL;
2984 
2985 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
2986 		return -EINVAL;
2987 
2988 	if (check_add_overflow(size, off, &end_index) ||
2989 	    end_index > get_vm_area_size(area))
2990 		return -EINVAL;
2991 	kaddr += off;
2992 
2993 	do {
2994 		struct page *page = vmalloc_to_page(kaddr);
2995 		int ret;
2996 
2997 		ret = vm_insert_page(vma, uaddr, page);
2998 		if (ret)
2999 			return ret;
3000 
3001 		uaddr += PAGE_SIZE;
3002 		kaddr += PAGE_SIZE;
3003 		size -= PAGE_SIZE;
3004 	} while (size > 0);
3005 
3006 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3007 
3008 	return 0;
3009 }
3010 EXPORT_SYMBOL(remap_vmalloc_range_partial);
3011 
3012 /**
3013  * remap_vmalloc_range - map vmalloc pages to userspace
3014  * @vma:		vma to cover (map full range of vma)
3015  * @addr:		vmalloc memory
3016  * @pgoff:		number of pages into addr before first page to map
3017  *
3018  * Returns:	0 for success, -Exxx on failure
3019  *
3020  * This function checks that addr is a valid vmalloc'ed area, and
3021  * that it is big enough to cover the vma. Will return failure if
3022  * that criteria isn't met.
3023  *
3024  * Similar to remap_pfn_range() (see mm/memory.c)
3025  */
3026 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3027 						unsigned long pgoff)
3028 {
3029 	return remap_vmalloc_range_partial(vma, vma->vm_start,
3030 					   addr, pgoff,
3031 					   vma->vm_end - vma->vm_start);
3032 }
3033 EXPORT_SYMBOL(remap_vmalloc_range);
3034 
3035 static int f(pte_t *pte, unsigned long addr, void *data)
3036 {
3037 	pte_t ***p = data;
3038 
3039 	if (p) {
3040 		*(*p) = pte;
3041 		(*p)++;
3042 	}
3043 	return 0;
3044 }
3045 
3046 /**
3047  * alloc_vm_area - allocate a range of kernel address space
3048  * @size:	   size of the area
3049  * @ptes:	   returns the PTEs for the address space
3050  *
3051  * Returns:	NULL on failure, vm_struct on success
3052  *
3053  * This function reserves a range of kernel address space, and
3054  * allocates pagetables to map that range.  No actual mappings
3055  * are created.
3056  *
3057  * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3058  * allocated for the VM area are returned.
3059  */
3060 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
3061 {
3062 	struct vm_struct *area;
3063 
3064 	area = get_vm_area_caller(size, VM_IOREMAP,
3065 				__builtin_return_address(0));
3066 	if (area == NULL)
3067 		return NULL;
3068 
3069 	/*
3070 	 * This ensures that page tables are constructed for this region
3071 	 * of kernel virtual address space and mapped into init_mm.
3072 	 */
3073 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3074 				size, f, ptes ? &ptes : NULL)) {
3075 		free_vm_area(area);
3076 		return NULL;
3077 	}
3078 
3079 	return area;
3080 }
3081 EXPORT_SYMBOL_GPL(alloc_vm_area);
3082 
3083 void free_vm_area(struct vm_struct *area)
3084 {
3085 	struct vm_struct *ret;
3086 	ret = remove_vm_area(area->addr);
3087 	BUG_ON(ret != area);
3088 	kfree(area);
3089 }
3090 EXPORT_SYMBOL_GPL(free_vm_area);
3091 
3092 #ifdef CONFIG_SMP
3093 static struct vmap_area *node_to_va(struct rb_node *n)
3094 {
3095 	return rb_entry_safe(n, struct vmap_area, rb_node);
3096 }
3097 
3098 /**
3099  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3100  * @addr: target address
3101  *
3102  * Returns: vmap_area if it is found. If there is no such area
3103  *   the first highest(reverse order) vmap_area is returned
3104  *   i.e. va->va_start < addr && va->va_end < addr or NULL
3105  *   if there are no any areas before @addr.
3106  */
3107 static struct vmap_area *
3108 pvm_find_va_enclose_addr(unsigned long addr)
3109 {
3110 	struct vmap_area *va, *tmp;
3111 	struct rb_node *n;
3112 
3113 	n = free_vmap_area_root.rb_node;
3114 	va = NULL;
3115 
3116 	while (n) {
3117 		tmp = rb_entry(n, struct vmap_area, rb_node);
3118 		if (tmp->va_start <= addr) {
3119 			va = tmp;
3120 			if (tmp->va_end >= addr)
3121 				break;
3122 
3123 			n = n->rb_right;
3124 		} else {
3125 			n = n->rb_left;
3126 		}
3127 	}
3128 
3129 	return va;
3130 }
3131 
3132 /**
3133  * pvm_determine_end_from_reverse - find the highest aligned address
3134  * of free block below VMALLOC_END
3135  * @va:
3136  *   in - the VA we start the search(reverse order);
3137  *   out - the VA with the highest aligned end address.
3138  *
3139  * Returns: determined end address within vmap_area
3140  */
3141 static unsigned long
3142 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3143 {
3144 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3145 	unsigned long addr;
3146 
3147 	if (likely(*va)) {
3148 		list_for_each_entry_from_reverse((*va),
3149 				&free_vmap_area_list, list) {
3150 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3151 			if ((*va)->va_start < addr)
3152 				return addr;
3153 		}
3154 	}
3155 
3156 	return 0;
3157 }
3158 
3159 /**
3160  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3161  * @offsets: array containing offset of each area
3162  * @sizes: array containing size of each area
3163  * @nr_vms: the number of areas to allocate
3164  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3165  *
3166  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3167  *	    vm_structs on success, %NULL on failure
3168  *
3169  * Percpu allocator wants to use congruent vm areas so that it can
3170  * maintain the offsets among percpu areas.  This function allocates
3171  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3172  * be scattered pretty far, distance between two areas easily going up
3173  * to gigabytes.  To avoid interacting with regular vmallocs, these
3174  * areas are allocated from top.
3175  *
3176  * Despite its complicated look, this allocator is rather simple. It
3177  * does everything top-down and scans free blocks from the end looking
3178  * for matching base. While scanning, if any of the areas do not fit the
3179  * base address is pulled down to fit the area. Scanning is repeated till
3180  * all the areas fit and then all necessary data structures are inserted
3181  * and the result is returned.
3182  */
3183 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3184 				     const size_t *sizes, int nr_vms,
3185 				     size_t align)
3186 {
3187 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3188 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3189 	struct vmap_area **vas, *va;
3190 	struct vm_struct **vms;
3191 	int area, area2, last_area, term_area;
3192 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
3193 	bool purged = false;
3194 	enum fit_type type;
3195 
3196 	/* verify parameters and allocate data structures */
3197 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3198 	for (last_area = 0, area = 0; area < nr_vms; area++) {
3199 		start = offsets[area];
3200 		end = start + sizes[area];
3201 
3202 		/* is everything aligned properly? */
3203 		BUG_ON(!IS_ALIGNED(offsets[area], align));
3204 		BUG_ON(!IS_ALIGNED(sizes[area], align));
3205 
3206 		/* detect the area with the highest address */
3207 		if (start > offsets[last_area])
3208 			last_area = area;
3209 
3210 		for (area2 = area + 1; area2 < nr_vms; area2++) {
3211 			unsigned long start2 = offsets[area2];
3212 			unsigned long end2 = start2 + sizes[area2];
3213 
3214 			BUG_ON(start2 < end && start < end2);
3215 		}
3216 	}
3217 	last_end = offsets[last_area] + sizes[last_area];
3218 
3219 	if (vmalloc_end - vmalloc_start < last_end) {
3220 		WARN_ON(true);
3221 		return NULL;
3222 	}
3223 
3224 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3225 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3226 	if (!vas || !vms)
3227 		goto err_free2;
3228 
3229 	for (area = 0; area < nr_vms; area++) {
3230 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3231 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3232 		if (!vas[area] || !vms[area])
3233 			goto err_free;
3234 	}
3235 retry:
3236 	spin_lock(&free_vmap_area_lock);
3237 
3238 	/* start scanning - we scan from the top, begin with the last area */
3239 	area = term_area = last_area;
3240 	start = offsets[area];
3241 	end = start + sizes[area];
3242 
3243 	va = pvm_find_va_enclose_addr(vmalloc_end);
3244 	base = pvm_determine_end_from_reverse(&va, align) - end;
3245 
3246 	while (true) {
3247 		/*
3248 		 * base might have underflowed, add last_end before
3249 		 * comparing.
3250 		 */
3251 		if (base + last_end < vmalloc_start + last_end)
3252 			goto overflow;
3253 
3254 		/*
3255 		 * Fitting base has not been found.
3256 		 */
3257 		if (va == NULL)
3258 			goto overflow;
3259 
3260 		/*
3261 		 * If required width exceeds current VA block, move
3262 		 * base downwards and then recheck.
3263 		 */
3264 		if (base + end > va->va_end) {
3265 			base = pvm_determine_end_from_reverse(&va, align) - end;
3266 			term_area = area;
3267 			continue;
3268 		}
3269 
3270 		/*
3271 		 * If this VA does not fit, move base downwards and recheck.
3272 		 */
3273 		if (base + start < va->va_start) {
3274 			va = node_to_va(rb_prev(&va->rb_node));
3275 			base = pvm_determine_end_from_reverse(&va, align) - end;
3276 			term_area = area;
3277 			continue;
3278 		}
3279 
3280 		/*
3281 		 * This area fits, move on to the previous one.  If
3282 		 * the previous one is the terminal one, we're done.
3283 		 */
3284 		area = (area + nr_vms - 1) % nr_vms;
3285 		if (area == term_area)
3286 			break;
3287 
3288 		start = offsets[area];
3289 		end = start + sizes[area];
3290 		va = pvm_find_va_enclose_addr(base + end);
3291 	}
3292 
3293 	/* we've found a fitting base, insert all va's */
3294 	for (area = 0; area < nr_vms; area++) {
3295 		int ret;
3296 
3297 		start = base + offsets[area];
3298 		size = sizes[area];
3299 
3300 		va = pvm_find_va_enclose_addr(start);
3301 		if (WARN_ON_ONCE(va == NULL))
3302 			/* It is a BUG(), but trigger recovery instead. */
3303 			goto recovery;
3304 
3305 		type = classify_va_fit_type(va, start, size);
3306 		if (WARN_ON_ONCE(type == NOTHING_FIT))
3307 			/* It is a BUG(), but trigger recovery instead. */
3308 			goto recovery;
3309 
3310 		ret = adjust_va_to_fit_type(va, start, size, type);
3311 		if (unlikely(ret))
3312 			goto recovery;
3313 
3314 		/* Allocated area. */
3315 		va = vas[area];
3316 		va->va_start = start;
3317 		va->va_end = start + size;
3318 	}
3319 
3320 	spin_unlock(&free_vmap_area_lock);
3321 
3322 	/* populate the kasan shadow space */
3323 	for (area = 0; area < nr_vms; area++) {
3324 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3325 			goto err_free_shadow;
3326 
3327 		kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3328 				       sizes[area]);
3329 	}
3330 
3331 	/* insert all vm's */
3332 	spin_lock(&vmap_area_lock);
3333 	for (area = 0; area < nr_vms; area++) {
3334 		insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3335 
3336 		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3337 				 pcpu_get_vm_areas);
3338 	}
3339 	spin_unlock(&vmap_area_lock);
3340 
3341 	kfree(vas);
3342 	return vms;
3343 
3344 recovery:
3345 	/*
3346 	 * Remove previously allocated areas. There is no
3347 	 * need in removing these areas from the busy tree,
3348 	 * because they are inserted only on the final step
3349 	 * and when pcpu_get_vm_areas() is success.
3350 	 */
3351 	while (area--) {
3352 		orig_start = vas[area]->va_start;
3353 		orig_end = vas[area]->va_end;
3354 		va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3355 					    &free_vmap_area_list);
3356 		if (va)
3357 			kasan_release_vmalloc(orig_start, orig_end,
3358 				va->va_start, va->va_end);
3359 		vas[area] = NULL;
3360 	}
3361 
3362 overflow:
3363 	spin_unlock(&free_vmap_area_lock);
3364 	if (!purged) {
3365 		purge_vmap_area_lazy();
3366 		purged = true;
3367 
3368 		/* Before "retry", check if we recover. */
3369 		for (area = 0; area < nr_vms; area++) {
3370 			if (vas[area])
3371 				continue;
3372 
3373 			vas[area] = kmem_cache_zalloc(
3374 				vmap_area_cachep, GFP_KERNEL);
3375 			if (!vas[area])
3376 				goto err_free;
3377 		}
3378 
3379 		goto retry;
3380 	}
3381 
3382 err_free:
3383 	for (area = 0; area < nr_vms; area++) {
3384 		if (vas[area])
3385 			kmem_cache_free(vmap_area_cachep, vas[area]);
3386 
3387 		kfree(vms[area]);
3388 	}
3389 err_free2:
3390 	kfree(vas);
3391 	kfree(vms);
3392 	return NULL;
3393 
3394 err_free_shadow:
3395 	spin_lock(&free_vmap_area_lock);
3396 	/*
3397 	 * We release all the vmalloc shadows, even the ones for regions that
3398 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
3399 	 * being able to tolerate this case.
3400 	 */
3401 	for (area = 0; area < nr_vms; area++) {
3402 		orig_start = vas[area]->va_start;
3403 		orig_end = vas[area]->va_end;
3404 		va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3405 					    &free_vmap_area_list);
3406 		if (va)
3407 			kasan_release_vmalloc(orig_start, orig_end,
3408 				va->va_start, va->va_end);
3409 		vas[area] = NULL;
3410 		kfree(vms[area]);
3411 	}
3412 	spin_unlock(&free_vmap_area_lock);
3413 	kfree(vas);
3414 	kfree(vms);
3415 	return NULL;
3416 }
3417 
3418 /**
3419  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3420  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3421  * @nr_vms: the number of allocated areas
3422  *
3423  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3424  */
3425 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3426 {
3427 	int i;
3428 
3429 	for (i = 0; i < nr_vms; i++)
3430 		free_vm_area(vms[i]);
3431 	kfree(vms);
3432 }
3433 #endif	/* CONFIG_SMP */
3434 
3435 #ifdef CONFIG_PROC_FS
3436 static void *s_start(struct seq_file *m, loff_t *pos)
3437 	__acquires(&vmap_purge_lock)
3438 	__acquires(&vmap_area_lock)
3439 {
3440 	mutex_lock(&vmap_purge_lock);
3441 	spin_lock(&vmap_area_lock);
3442 
3443 	return seq_list_start(&vmap_area_list, *pos);
3444 }
3445 
3446 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3447 {
3448 	return seq_list_next(p, &vmap_area_list, pos);
3449 }
3450 
3451 static void s_stop(struct seq_file *m, void *p)
3452 	__releases(&vmap_purge_lock)
3453 	__releases(&vmap_area_lock)
3454 {
3455 	mutex_unlock(&vmap_purge_lock);
3456 	spin_unlock(&vmap_area_lock);
3457 }
3458 
3459 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3460 {
3461 	if (IS_ENABLED(CONFIG_NUMA)) {
3462 		unsigned int nr, *counters = m->private;
3463 
3464 		if (!counters)
3465 			return;
3466 
3467 		if (v->flags & VM_UNINITIALIZED)
3468 			return;
3469 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3470 		smp_rmb();
3471 
3472 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3473 
3474 		for (nr = 0; nr < v->nr_pages; nr++)
3475 			counters[page_to_nid(v->pages[nr])]++;
3476 
3477 		for_each_node_state(nr, N_HIGH_MEMORY)
3478 			if (counters[nr])
3479 				seq_printf(m, " N%u=%u", nr, counters[nr]);
3480 	}
3481 }
3482 
3483 static void show_purge_info(struct seq_file *m)
3484 {
3485 	struct llist_node *head;
3486 	struct vmap_area *va;
3487 
3488 	head = READ_ONCE(vmap_purge_list.first);
3489 	if (head == NULL)
3490 		return;
3491 
3492 	llist_for_each_entry(va, head, purge_list) {
3493 		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3494 			(void *)va->va_start, (void *)va->va_end,
3495 			va->va_end - va->va_start);
3496 	}
3497 }
3498 
3499 static int s_show(struct seq_file *m, void *p)
3500 {
3501 	struct vmap_area *va;
3502 	struct vm_struct *v;
3503 
3504 	va = list_entry(p, struct vmap_area, list);
3505 
3506 	/*
3507 	 * s_show can encounter race with remove_vm_area, !vm on behalf
3508 	 * of vmap area is being tear down or vm_map_ram allocation.
3509 	 */
3510 	if (!va->vm) {
3511 		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3512 			(void *)va->va_start, (void *)va->va_end,
3513 			va->va_end - va->va_start);
3514 
3515 		return 0;
3516 	}
3517 
3518 	v = va->vm;
3519 
3520 	seq_printf(m, "0x%pK-0x%pK %7ld",
3521 		v->addr, v->addr + v->size, v->size);
3522 
3523 	if (v->caller)
3524 		seq_printf(m, " %pS", v->caller);
3525 
3526 	if (v->nr_pages)
3527 		seq_printf(m, " pages=%d", v->nr_pages);
3528 
3529 	if (v->phys_addr)
3530 		seq_printf(m, " phys=%pa", &v->phys_addr);
3531 
3532 	if (v->flags & VM_IOREMAP)
3533 		seq_puts(m, " ioremap");
3534 
3535 	if (v->flags & VM_ALLOC)
3536 		seq_puts(m, " vmalloc");
3537 
3538 	if (v->flags & VM_MAP)
3539 		seq_puts(m, " vmap");
3540 
3541 	if (v->flags & VM_USERMAP)
3542 		seq_puts(m, " user");
3543 
3544 	if (v->flags & VM_DMA_COHERENT)
3545 		seq_puts(m, " dma-coherent");
3546 
3547 	if (is_vmalloc_addr(v->pages))
3548 		seq_puts(m, " vpages");
3549 
3550 	show_numa_info(m, v);
3551 	seq_putc(m, '\n');
3552 
3553 	/*
3554 	 * As a final step, dump "unpurged" areas. Note,
3555 	 * that entire "/proc/vmallocinfo" output will not
3556 	 * be address sorted, because the purge list is not
3557 	 * sorted.
3558 	 */
3559 	if (list_is_last(&va->list, &vmap_area_list))
3560 		show_purge_info(m);
3561 
3562 	return 0;
3563 }
3564 
3565 static const struct seq_operations vmalloc_op = {
3566 	.start = s_start,
3567 	.next = s_next,
3568 	.stop = s_stop,
3569 	.show = s_show,
3570 };
3571 
3572 static int __init proc_vmalloc_init(void)
3573 {
3574 	if (IS_ENABLED(CONFIG_NUMA))
3575 		proc_create_seq_private("vmallocinfo", 0400, NULL,
3576 				&vmalloc_op,
3577 				nr_node_ids * sizeof(unsigned int), NULL);
3578 	else
3579 		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3580 	return 0;
3581 }
3582 module_init(proc_vmalloc_init);
3583 
3584 #endif
3585