xref: /openbmc/linux/mm/vmalloc.c (revision 88dca4ca)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/vmalloc.c
4  *
5  *  Copyright (C) 1993  Linus Torvalds
6  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
8  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
9  *  Numa awareness, Christoph Lameter, SGI, June 2005
10  */
11 
12 #include <linux/vmalloc.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/highmem.h>
16 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/set_memory.h>
23 #include <linux/debugobjects.h>
24 #include <linux/kallsyms.h>
25 #include <linux/list.h>
26 #include <linux/notifier.h>
27 #include <linux/rbtree.h>
28 #include <linux/radix-tree.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/llist.h>
35 #include <linux/bitops.h>
36 #include <linux/rbtree_augmented.h>
37 #include <linux/overflow.h>
38 
39 #include <linux/uaccess.h>
40 #include <asm/tlbflush.h>
41 #include <asm/shmparam.h>
42 
43 #include "internal.h"
44 
45 bool is_vmalloc_addr(const void *x)
46 {
47 	unsigned long addr = (unsigned long)x;
48 
49 	return addr >= VMALLOC_START && addr < VMALLOC_END;
50 }
51 EXPORT_SYMBOL(is_vmalloc_addr);
52 
53 struct vfree_deferred {
54 	struct llist_head list;
55 	struct work_struct wq;
56 };
57 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
58 
59 static void __vunmap(const void *, int);
60 
61 static void free_work(struct work_struct *w)
62 {
63 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
64 	struct llist_node *t, *llnode;
65 
66 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
67 		__vunmap((void *)llnode, 1);
68 }
69 
70 /*** Page table manipulation functions ***/
71 
72 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73 {
74 	pte_t *pte;
75 
76 	pte = pte_offset_kernel(pmd, addr);
77 	do {
78 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
79 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80 	} while (pte++, addr += PAGE_SIZE, addr != end);
81 }
82 
83 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
84 {
85 	pmd_t *pmd;
86 	unsigned long next;
87 
88 	pmd = pmd_offset(pud, addr);
89 	do {
90 		next = pmd_addr_end(addr, end);
91 		if (pmd_clear_huge(pmd))
92 			continue;
93 		if (pmd_none_or_clear_bad(pmd))
94 			continue;
95 		vunmap_pte_range(pmd, addr, next);
96 	} while (pmd++, addr = next, addr != end);
97 }
98 
99 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
100 {
101 	pud_t *pud;
102 	unsigned long next;
103 
104 	pud = pud_offset(p4d, addr);
105 	do {
106 		next = pud_addr_end(addr, end);
107 		if (pud_clear_huge(pud))
108 			continue;
109 		if (pud_none_or_clear_bad(pud))
110 			continue;
111 		vunmap_pmd_range(pud, addr, next);
112 	} while (pud++, addr = next, addr != end);
113 }
114 
115 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
116 {
117 	p4d_t *p4d;
118 	unsigned long next;
119 
120 	p4d = p4d_offset(pgd, addr);
121 	do {
122 		next = p4d_addr_end(addr, end);
123 		if (p4d_clear_huge(p4d))
124 			continue;
125 		if (p4d_none_or_clear_bad(p4d))
126 			continue;
127 		vunmap_pud_range(p4d, addr, next);
128 	} while (p4d++, addr = next, addr != end);
129 }
130 
131 /**
132  * unmap_kernel_range_noflush - unmap kernel VM area
133  * @addr: start of the VM area to unmap
134  * @size: size of the VM area to unmap
135  *
136  * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size specify
137  * should have been allocated using get_vm_area() and its friends.
138  *
139  * NOTE:
140  * This function does NOT do any cache flushing.  The caller is responsible
141  * for calling flush_cache_vunmap() on to-be-mapped areas before calling this
142  * function and flush_tlb_kernel_range() after.
143  */
144 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
145 {
146 	unsigned long end = addr + size;
147 	unsigned long next;
148 	pgd_t *pgd;
149 
150 	BUG_ON(addr >= end);
151 	pgd = pgd_offset_k(addr);
152 	do {
153 		next = pgd_addr_end(addr, end);
154 		if (pgd_none_or_clear_bad(pgd))
155 			continue;
156 		vunmap_p4d_range(pgd, addr, next);
157 	} while (pgd++, addr = next, addr != end);
158 }
159 
160 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
161 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
162 {
163 	pte_t *pte;
164 
165 	/*
166 	 * nr is a running index into the array which helps higher level
167 	 * callers keep track of where we're up to.
168 	 */
169 
170 	pte = pte_alloc_kernel(pmd, addr);
171 	if (!pte)
172 		return -ENOMEM;
173 	do {
174 		struct page *page = pages[*nr];
175 
176 		if (WARN_ON(!pte_none(*pte)))
177 			return -EBUSY;
178 		if (WARN_ON(!page))
179 			return -ENOMEM;
180 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
181 		(*nr)++;
182 	} while (pte++, addr += PAGE_SIZE, addr != end);
183 	return 0;
184 }
185 
186 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
187 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
188 {
189 	pmd_t *pmd;
190 	unsigned long next;
191 
192 	pmd = pmd_alloc(&init_mm, pud, addr);
193 	if (!pmd)
194 		return -ENOMEM;
195 	do {
196 		next = pmd_addr_end(addr, end);
197 		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
198 			return -ENOMEM;
199 	} while (pmd++, addr = next, addr != end);
200 	return 0;
201 }
202 
203 static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
204 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
205 {
206 	pud_t *pud;
207 	unsigned long next;
208 
209 	pud = pud_alloc(&init_mm, p4d, addr);
210 	if (!pud)
211 		return -ENOMEM;
212 	do {
213 		next = pud_addr_end(addr, end);
214 		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
215 			return -ENOMEM;
216 	} while (pud++, addr = next, addr != end);
217 	return 0;
218 }
219 
220 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
221 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
222 {
223 	p4d_t *p4d;
224 	unsigned long next;
225 
226 	p4d = p4d_alloc(&init_mm, pgd, addr);
227 	if (!p4d)
228 		return -ENOMEM;
229 	do {
230 		next = p4d_addr_end(addr, end);
231 		if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
232 			return -ENOMEM;
233 	} while (p4d++, addr = next, addr != end);
234 	return 0;
235 }
236 
237 /**
238  * map_kernel_range_noflush - map kernel VM area with the specified pages
239  * @addr: start of the VM area to map
240  * @size: size of the VM area to map
241  * @prot: page protection flags to use
242  * @pages: pages to map
243  *
244  * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size specify should
245  * have been allocated using get_vm_area() and its friends.
246  *
247  * NOTE:
248  * This function does NOT do any cache flushing.  The caller is responsible for
249  * calling flush_cache_vmap() on to-be-mapped areas before calling this
250  * function.
251  *
252  * RETURNS:
253  * 0 on success, -errno on failure.
254  */
255 int map_kernel_range_noflush(unsigned long addr, unsigned long size,
256 			     pgprot_t prot, struct page **pages)
257 {
258 	unsigned long end = addr + size;
259 	unsigned long next;
260 	pgd_t *pgd;
261 	int err = 0;
262 	int nr = 0;
263 
264 	BUG_ON(addr >= end);
265 	pgd = pgd_offset_k(addr);
266 	do {
267 		next = pgd_addr_end(addr, end);
268 		err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
269 		if (err)
270 			return err;
271 	} while (pgd++, addr = next, addr != end);
272 
273 	return 0;
274 }
275 
276 int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
277 		struct page **pages)
278 {
279 	int ret;
280 
281 	ret = map_kernel_range_noflush(start, size, prot, pages);
282 	flush_cache_vmap(start, start + size);
283 	return ret;
284 }
285 
286 int is_vmalloc_or_module_addr(const void *x)
287 {
288 	/*
289 	 * ARM, x86-64 and sparc64 put modules in a special place,
290 	 * and fall back on vmalloc() if that fails. Others
291 	 * just put it in the vmalloc space.
292 	 */
293 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
294 	unsigned long addr = (unsigned long)x;
295 	if (addr >= MODULES_VADDR && addr < MODULES_END)
296 		return 1;
297 #endif
298 	return is_vmalloc_addr(x);
299 }
300 
301 /*
302  * Walk a vmap address to the struct page it maps.
303  */
304 struct page *vmalloc_to_page(const void *vmalloc_addr)
305 {
306 	unsigned long addr = (unsigned long) vmalloc_addr;
307 	struct page *page = NULL;
308 	pgd_t *pgd = pgd_offset_k(addr);
309 	p4d_t *p4d;
310 	pud_t *pud;
311 	pmd_t *pmd;
312 	pte_t *ptep, pte;
313 
314 	/*
315 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
316 	 * architectures that do not vmalloc module space
317 	 */
318 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
319 
320 	if (pgd_none(*pgd))
321 		return NULL;
322 	p4d = p4d_offset(pgd, addr);
323 	if (p4d_none(*p4d))
324 		return NULL;
325 	pud = pud_offset(p4d, addr);
326 
327 	/*
328 	 * Don't dereference bad PUD or PMD (below) entries. This will also
329 	 * identify huge mappings, which we may encounter on architectures
330 	 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
331 	 * identified as vmalloc addresses by is_vmalloc_addr(), but are
332 	 * not [unambiguously] associated with a struct page, so there is
333 	 * no correct value to return for them.
334 	 */
335 	WARN_ON_ONCE(pud_bad(*pud));
336 	if (pud_none(*pud) || pud_bad(*pud))
337 		return NULL;
338 	pmd = pmd_offset(pud, addr);
339 	WARN_ON_ONCE(pmd_bad(*pmd));
340 	if (pmd_none(*pmd) || pmd_bad(*pmd))
341 		return NULL;
342 
343 	ptep = pte_offset_map(pmd, addr);
344 	pte = *ptep;
345 	if (pte_present(pte))
346 		page = pte_page(pte);
347 	pte_unmap(ptep);
348 	return page;
349 }
350 EXPORT_SYMBOL(vmalloc_to_page);
351 
352 /*
353  * Map a vmalloc()-space virtual address to the physical page frame number.
354  */
355 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
356 {
357 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
358 }
359 EXPORT_SYMBOL(vmalloc_to_pfn);
360 
361 
362 /*** Global kva allocator ***/
363 
364 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
365 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
366 
367 
368 static DEFINE_SPINLOCK(vmap_area_lock);
369 static DEFINE_SPINLOCK(free_vmap_area_lock);
370 /* Export for kexec only */
371 LIST_HEAD(vmap_area_list);
372 static LLIST_HEAD(vmap_purge_list);
373 static struct rb_root vmap_area_root = RB_ROOT;
374 static bool vmap_initialized __read_mostly;
375 
376 /*
377  * This kmem_cache is used for vmap_area objects. Instead of
378  * allocating from slab we reuse an object from this cache to
379  * make things faster. Especially in "no edge" splitting of
380  * free block.
381  */
382 static struct kmem_cache *vmap_area_cachep;
383 
384 /*
385  * This linked list is used in pair with free_vmap_area_root.
386  * It gives O(1) access to prev/next to perform fast coalescing.
387  */
388 static LIST_HEAD(free_vmap_area_list);
389 
390 /*
391  * This augment red-black tree represents the free vmap space.
392  * All vmap_area objects in this tree are sorted by va->va_start
393  * address. It is used for allocation and merging when a vmap
394  * object is released.
395  *
396  * Each vmap_area node contains a maximum available free block
397  * of its sub-tree, right or left. Therefore it is possible to
398  * find a lowest match of free area.
399  */
400 static struct rb_root free_vmap_area_root = RB_ROOT;
401 
402 /*
403  * Preload a CPU with one object for "no edge" split case. The
404  * aim is to get rid of allocations from the atomic context, thus
405  * to use more permissive allocation masks.
406  */
407 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
408 
409 static __always_inline unsigned long
410 va_size(struct vmap_area *va)
411 {
412 	return (va->va_end - va->va_start);
413 }
414 
415 static __always_inline unsigned long
416 get_subtree_max_size(struct rb_node *node)
417 {
418 	struct vmap_area *va;
419 
420 	va = rb_entry_safe(node, struct vmap_area, rb_node);
421 	return va ? va->subtree_max_size : 0;
422 }
423 
424 /*
425  * Gets called when remove the node and rotate.
426  */
427 static __always_inline unsigned long
428 compute_subtree_max_size(struct vmap_area *va)
429 {
430 	return max3(va_size(va),
431 		get_subtree_max_size(va->rb_node.rb_left),
432 		get_subtree_max_size(va->rb_node.rb_right));
433 }
434 
435 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
436 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
437 
438 static void purge_vmap_area_lazy(void);
439 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
440 static unsigned long lazy_max_pages(void);
441 
442 static atomic_long_t nr_vmalloc_pages;
443 
444 unsigned long vmalloc_nr_pages(void)
445 {
446 	return atomic_long_read(&nr_vmalloc_pages);
447 }
448 
449 static struct vmap_area *__find_vmap_area(unsigned long addr)
450 {
451 	struct rb_node *n = vmap_area_root.rb_node;
452 
453 	while (n) {
454 		struct vmap_area *va;
455 
456 		va = rb_entry(n, struct vmap_area, rb_node);
457 		if (addr < va->va_start)
458 			n = n->rb_left;
459 		else if (addr >= va->va_end)
460 			n = n->rb_right;
461 		else
462 			return va;
463 	}
464 
465 	return NULL;
466 }
467 
468 /*
469  * This function returns back addresses of parent node
470  * and its left or right link for further processing.
471  */
472 static __always_inline struct rb_node **
473 find_va_links(struct vmap_area *va,
474 	struct rb_root *root, struct rb_node *from,
475 	struct rb_node **parent)
476 {
477 	struct vmap_area *tmp_va;
478 	struct rb_node **link;
479 
480 	if (root) {
481 		link = &root->rb_node;
482 		if (unlikely(!*link)) {
483 			*parent = NULL;
484 			return link;
485 		}
486 	} else {
487 		link = &from;
488 	}
489 
490 	/*
491 	 * Go to the bottom of the tree. When we hit the last point
492 	 * we end up with parent rb_node and correct direction, i name
493 	 * it link, where the new va->rb_node will be attached to.
494 	 */
495 	do {
496 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
497 
498 		/*
499 		 * During the traversal we also do some sanity check.
500 		 * Trigger the BUG() if there are sides(left/right)
501 		 * or full overlaps.
502 		 */
503 		if (va->va_start < tmp_va->va_end &&
504 				va->va_end <= tmp_va->va_start)
505 			link = &(*link)->rb_left;
506 		else if (va->va_end > tmp_va->va_start &&
507 				va->va_start >= tmp_va->va_end)
508 			link = &(*link)->rb_right;
509 		else
510 			BUG();
511 	} while (*link);
512 
513 	*parent = &tmp_va->rb_node;
514 	return link;
515 }
516 
517 static __always_inline struct list_head *
518 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
519 {
520 	struct list_head *list;
521 
522 	if (unlikely(!parent))
523 		/*
524 		 * The red-black tree where we try to find VA neighbors
525 		 * before merging or inserting is empty, i.e. it means
526 		 * there is no free vmap space. Normally it does not
527 		 * happen but we handle this case anyway.
528 		 */
529 		return NULL;
530 
531 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
532 	return (&parent->rb_right == link ? list->next : list);
533 }
534 
535 static __always_inline void
536 link_va(struct vmap_area *va, struct rb_root *root,
537 	struct rb_node *parent, struct rb_node **link, struct list_head *head)
538 {
539 	/*
540 	 * VA is still not in the list, but we can
541 	 * identify its future previous list_head node.
542 	 */
543 	if (likely(parent)) {
544 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
545 		if (&parent->rb_right != link)
546 			head = head->prev;
547 	}
548 
549 	/* Insert to the rb-tree */
550 	rb_link_node(&va->rb_node, parent, link);
551 	if (root == &free_vmap_area_root) {
552 		/*
553 		 * Some explanation here. Just perform simple insertion
554 		 * to the tree. We do not set va->subtree_max_size to
555 		 * its current size before calling rb_insert_augmented().
556 		 * It is because of we populate the tree from the bottom
557 		 * to parent levels when the node _is_ in the tree.
558 		 *
559 		 * Therefore we set subtree_max_size to zero after insertion,
560 		 * to let __augment_tree_propagate_from() puts everything to
561 		 * the correct order later on.
562 		 */
563 		rb_insert_augmented(&va->rb_node,
564 			root, &free_vmap_area_rb_augment_cb);
565 		va->subtree_max_size = 0;
566 	} else {
567 		rb_insert_color(&va->rb_node, root);
568 	}
569 
570 	/* Address-sort this list */
571 	list_add(&va->list, head);
572 }
573 
574 static __always_inline void
575 unlink_va(struct vmap_area *va, struct rb_root *root)
576 {
577 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
578 		return;
579 
580 	if (root == &free_vmap_area_root)
581 		rb_erase_augmented(&va->rb_node,
582 			root, &free_vmap_area_rb_augment_cb);
583 	else
584 		rb_erase(&va->rb_node, root);
585 
586 	list_del(&va->list);
587 	RB_CLEAR_NODE(&va->rb_node);
588 }
589 
590 #if DEBUG_AUGMENT_PROPAGATE_CHECK
591 static void
592 augment_tree_propagate_check(struct rb_node *n)
593 {
594 	struct vmap_area *va;
595 	struct rb_node *node;
596 	unsigned long size;
597 	bool found = false;
598 
599 	if (n == NULL)
600 		return;
601 
602 	va = rb_entry(n, struct vmap_area, rb_node);
603 	size = va->subtree_max_size;
604 	node = n;
605 
606 	while (node) {
607 		va = rb_entry(node, struct vmap_area, rb_node);
608 
609 		if (get_subtree_max_size(node->rb_left) == size) {
610 			node = node->rb_left;
611 		} else {
612 			if (va_size(va) == size) {
613 				found = true;
614 				break;
615 			}
616 
617 			node = node->rb_right;
618 		}
619 	}
620 
621 	if (!found) {
622 		va = rb_entry(n, struct vmap_area, rb_node);
623 		pr_emerg("tree is corrupted: %lu, %lu\n",
624 			va_size(va), va->subtree_max_size);
625 	}
626 
627 	augment_tree_propagate_check(n->rb_left);
628 	augment_tree_propagate_check(n->rb_right);
629 }
630 #endif
631 
632 /*
633  * This function populates subtree_max_size from bottom to upper
634  * levels starting from VA point. The propagation must be done
635  * when VA size is modified by changing its va_start/va_end. Or
636  * in case of newly inserting of VA to the tree.
637  *
638  * It means that __augment_tree_propagate_from() must be called:
639  * - After VA has been inserted to the tree(free path);
640  * - After VA has been shrunk(allocation path);
641  * - After VA has been increased(merging path).
642  *
643  * Please note that, it does not mean that upper parent nodes
644  * and their subtree_max_size are recalculated all the time up
645  * to the root node.
646  *
647  *       4--8
648  *        /\
649  *       /  \
650  *      /    \
651  *    2--2  8--8
652  *
653  * For example if we modify the node 4, shrinking it to 2, then
654  * no any modification is required. If we shrink the node 2 to 1
655  * its subtree_max_size is updated only, and set to 1. If we shrink
656  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
657  * node becomes 4--6.
658  */
659 static __always_inline void
660 augment_tree_propagate_from(struct vmap_area *va)
661 {
662 	struct rb_node *node = &va->rb_node;
663 	unsigned long new_va_sub_max_size;
664 
665 	while (node) {
666 		va = rb_entry(node, struct vmap_area, rb_node);
667 		new_va_sub_max_size = compute_subtree_max_size(va);
668 
669 		/*
670 		 * If the newly calculated maximum available size of the
671 		 * subtree is equal to the current one, then it means that
672 		 * the tree is propagated correctly. So we have to stop at
673 		 * this point to save cycles.
674 		 */
675 		if (va->subtree_max_size == new_va_sub_max_size)
676 			break;
677 
678 		va->subtree_max_size = new_va_sub_max_size;
679 		node = rb_parent(&va->rb_node);
680 	}
681 
682 #if DEBUG_AUGMENT_PROPAGATE_CHECK
683 	augment_tree_propagate_check(free_vmap_area_root.rb_node);
684 #endif
685 }
686 
687 static void
688 insert_vmap_area(struct vmap_area *va,
689 	struct rb_root *root, struct list_head *head)
690 {
691 	struct rb_node **link;
692 	struct rb_node *parent;
693 
694 	link = find_va_links(va, root, NULL, &parent);
695 	link_va(va, root, parent, link, head);
696 }
697 
698 static void
699 insert_vmap_area_augment(struct vmap_area *va,
700 	struct rb_node *from, struct rb_root *root,
701 	struct list_head *head)
702 {
703 	struct rb_node **link;
704 	struct rb_node *parent;
705 
706 	if (from)
707 		link = find_va_links(va, NULL, from, &parent);
708 	else
709 		link = find_va_links(va, root, NULL, &parent);
710 
711 	link_va(va, root, parent, link, head);
712 	augment_tree_propagate_from(va);
713 }
714 
715 /*
716  * Merge de-allocated chunk of VA memory with previous
717  * and next free blocks. If coalesce is not done a new
718  * free area is inserted. If VA has been merged, it is
719  * freed.
720  */
721 static __always_inline struct vmap_area *
722 merge_or_add_vmap_area(struct vmap_area *va,
723 	struct rb_root *root, struct list_head *head)
724 {
725 	struct vmap_area *sibling;
726 	struct list_head *next;
727 	struct rb_node **link;
728 	struct rb_node *parent;
729 	bool merged = false;
730 
731 	/*
732 	 * Find a place in the tree where VA potentially will be
733 	 * inserted, unless it is merged with its sibling/siblings.
734 	 */
735 	link = find_va_links(va, root, NULL, &parent);
736 
737 	/*
738 	 * Get next node of VA to check if merging can be done.
739 	 */
740 	next = get_va_next_sibling(parent, link);
741 	if (unlikely(next == NULL))
742 		goto insert;
743 
744 	/*
745 	 * start            end
746 	 * |                |
747 	 * |<------VA------>|<-----Next----->|
748 	 *                  |                |
749 	 *                  start            end
750 	 */
751 	if (next != head) {
752 		sibling = list_entry(next, struct vmap_area, list);
753 		if (sibling->va_start == va->va_end) {
754 			sibling->va_start = va->va_start;
755 
756 			/* Check and update the tree if needed. */
757 			augment_tree_propagate_from(sibling);
758 
759 			/* Free vmap_area object. */
760 			kmem_cache_free(vmap_area_cachep, va);
761 
762 			/* Point to the new merged area. */
763 			va = sibling;
764 			merged = true;
765 		}
766 	}
767 
768 	/*
769 	 * start            end
770 	 * |                |
771 	 * |<-----Prev----->|<------VA------>|
772 	 *                  |                |
773 	 *                  start            end
774 	 */
775 	if (next->prev != head) {
776 		sibling = list_entry(next->prev, struct vmap_area, list);
777 		if (sibling->va_end == va->va_start) {
778 			sibling->va_end = va->va_end;
779 
780 			/* Check and update the tree if needed. */
781 			augment_tree_propagate_from(sibling);
782 
783 			if (merged)
784 				unlink_va(va, root);
785 
786 			/* Free vmap_area object. */
787 			kmem_cache_free(vmap_area_cachep, va);
788 
789 			/* Point to the new merged area. */
790 			va = sibling;
791 			merged = true;
792 		}
793 	}
794 
795 insert:
796 	if (!merged) {
797 		link_va(va, root, parent, link, head);
798 		augment_tree_propagate_from(va);
799 	}
800 
801 	return va;
802 }
803 
804 static __always_inline bool
805 is_within_this_va(struct vmap_area *va, unsigned long size,
806 	unsigned long align, unsigned long vstart)
807 {
808 	unsigned long nva_start_addr;
809 
810 	if (va->va_start > vstart)
811 		nva_start_addr = ALIGN(va->va_start, align);
812 	else
813 		nva_start_addr = ALIGN(vstart, align);
814 
815 	/* Can be overflowed due to big size or alignment. */
816 	if (nva_start_addr + size < nva_start_addr ||
817 			nva_start_addr < vstart)
818 		return false;
819 
820 	return (nva_start_addr + size <= va->va_end);
821 }
822 
823 /*
824  * Find the first free block(lowest start address) in the tree,
825  * that will accomplish the request corresponding to passing
826  * parameters.
827  */
828 static __always_inline struct vmap_area *
829 find_vmap_lowest_match(unsigned long size,
830 	unsigned long align, unsigned long vstart)
831 {
832 	struct vmap_area *va;
833 	struct rb_node *node;
834 	unsigned long length;
835 
836 	/* Start from the root. */
837 	node = free_vmap_area_root.rb_node;
838 
839 	/* Adjust the search size for alignment overhead. */
840 	length = size + align - 1;
841 
842 	while (node) {
843 		va = rb_entry(node, struct vmap_area, rb_node);
844 
845 		if (get_subtree_max_size(node->rb_left) >= length &&
846 				vstart < va->va_start) {
847 			node = node->rb_left;
848 		} else {
849 			if (is_within_this_va(va, size, align, vstart))
850 				return va;
851 
852 			/*
853 			 * Does not make sense to go deeper towards the right
854 			 * sub-tree if it does not have a free block that is
855 			 * equal or bigger to the requested search length.
856 			 */
857 			if (get_subtree_max_size(node->rb_right) >= length) {
858 				node = node->rb_right;
859 				continue;
860 			}
861 
862 			/*
863 			 * OK. We roll back and find the first right sub-tree,
864 			 * that will satisfy the search criteria. It can happen
865 			 * only once due to "vstart" restriction.
866 			 */
867 			while ((node = rb_parent(node))) {
868 				va = rb_entry(node, struct vmap_area, rb_node);
869 				if (is_within_this_va(va, size, align, vstart))
870 					return va;
871 
872 				if (get_subtree_max_size(node->rb_right) >= length &&
873 						vstart <= va->va_start) {
874 					node = node->rb_right;
875 					break;
876 				}
877 			}
878 		}
879 	}
880 
881 	return NULL;
882 }
883 
884 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
885 #include <linux/random.h>
886 
887 static struct vmap_area *
888 find_vmap_lowest_linear_match(unsigned long size,
889 	unsigned long align, unsigned long vstart)
890 {
891 	struct vmap_area *va;
892 
893 	list_for_each_entry(va, &free_vmap_area_list, list) {
894 		if (!is_within_this_va(va, size, align, vstart))
895 			continue;
896 
897 		return va;
898 	}
899 
900 	return NULL;
901 }
902 
903 static void
904 find_vmap_lowest_match_check(unsigned long size)
905 {
906 	struct vmap_area *va_1, *va_2;
907 	unsigned long vstart;
908 	unsigned int rnd;
909 
910 	get_random_bytes(&rnd, sizeof(rnd));
911 	vstart = VMALLOC_START + rnd;
912 
913 	va_1 = find_vmap_lowest_match(size, 1, vstart);
914 	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
915 
916 	if (va_1 != va_2)
917 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
918 			va_1, va_2, vstart);
919 }
920 #endif
921 
922 enum fit_type {
923 	NOTHING_FIT = 0,
924 	FL_FIT_TYPE = 1,	/* full fit */
925 	LE_FIT_TYPE = 2,	/* left edge fit */
926 	RE_FIT_TYPE = 3,	/* right edge fit */
927 	NE_FIT_TYPE = 4		/* no edge fit */
928 };
929 
930 static __always_inline enum fit_type
931 classify_va_fit_type(struct vmap_area *va,
932 	unsigned long nva_start_addr, unsigned long size)
933 {
934 	enum fit_type type;
935 
936 	/* Check if it is within VA. */
937 	if (nva_start_addr < va->va_start ||
938 			nva_start_addr + size > va->va_end)
939 		return NOTHING_FIT;
940 
941 	/* Now classify. */
942 	if (va->va_start == nva_start_addr) {
943 		if (va->va_end == nva_start_addr + size)
944 			type = FL_FIT_TYPE;
945 		else
946 			type = LE_FIT_TYPE;
947 	} else if (va->va_end == nva_start_addr + size) {
948 		type = RE_FIT_TYPE;
949 	} else {
950 		type = NE_FIT_TYPE;
951 	}
952 
953 	return type;
954 }
955 
956 static __always_inline int
957 adjust_va_to_fit_type(struct vmap_area *va,
958 	unsigned long nva_start_addr, unsigned long size,
959 	enum fit_type type)
960 {
961 	struct vmap_area *lva = NULL;
962 
963 	if (type == FL_FIT_TYPE) {
964 		/*
965 		 * No need to split VA, it fully fits.
966 		 *
967 		 * |               |
968 		 * V      NVA      V
969 		 * |---------------|
970 		 */
971 		unlink_va(va, &free_vmap_area_root);
972 		kmem_cache_free(vmap_area_cachep, va);
973 	} else if (type == LE_FIT_TYPE) {
974 		/*
975 		 * Split left edge of fit VA.
976 		 *
977 		 * |       |
978 		 * V  NVA  V   R
979 		 * |-------|-------|
980 		 */
981 		va->va_start += size;
982 	} else if (type == RE_FIT_TYPE) {
983 		/*
984 		 * Split right edge of fit VA.
985 		 *
986 		 *         |       |
987 		 *     L   V  NVA  V
988 		 * |-------|-------|
989 		 */
990 		va->va_end = nva_start_addr;
991 	} else if (type == NE_FIT_TYPE) {
992 		/*
993 		 * Split no edge of fit VA.
994 		 *
995 		 *     |       |
996 		 *   L V  NVA  V R
997 		 * |---|-------|---|
998 		 */
999 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1000 		if (unlikely(!lva)) {
1001 			/*
1002 			 * For percpu allocator we do not do any pre-allocation
1003 			 * and leave it as it is. The reason is it most likely
1004 			 * never ends up with NE_FIT_TYPE splitting. In case of
1005 			 * percpu allocations offsets and sizes are aligned to
1006 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1007 			 * are its main fitting cases.
1008 			 *
1009 			 * There are a few exceptions though, as an example it is
1010 			 * a first allocation (early boot up) when we have "one"
1011 			 * big free space that has to be split.
1012 			 *
1013 			 * Also we can hit this path in case of regular "vmap"
1014 			 * allocations, if "this" current CPU was not preloaded.
1015 			 * See the comment in alloc_vmap_area() why. If so, then
1016 			 * GFP_NOWAIT is used instead to get an extra object for
1017 			 * split purpose. That is rare and most time does not
1018 			 * occur.
1019 			 *
1020 			 * What happens if an allocation gets failed. Basically,
1021 			 * an "overflow" path is triggered to purge lazily freed
1022 			 * areas to free some memory, then, the "retry" path is
1023 			 * triggered to repeat one more time. See more details
1024 			 * in alloc_vmap_area() function.
1025 			 */
1026 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1027 			if (!lva)
1028 				return -1;
1029 		}
1030 
1031 		/*
1032 		 * Build the remainder.
1033 		 */
1034 		lva->va_start = va->va_start;
1035 		lva->va_end = nva_start_addr;
1036 
1037 		/*
1038 		 * Shrink this VA to remaining size.
1039 		 */
1040 		va->va_start = nva_start_addr + size;
1041 	} else {
1042 		return -1;
1043 	}
1044 
1045 	if (type != FL_FIT_TYPE) {
1046 		augment_tree_propagate_from(va);
1047 
1048 		if (lva)	/* type == NE_FIT_TYPE */
1049 			insert_vmap_area_augment(lva, &va->rb_node,
1050 				&free_vmap_area_root, &free_vmap_area_list);
1051 	}
1052 
1053 	return 0;
1054 }
1055 
1056 /*
1057  * Returns a start address of the newly allocated area, if success.
1058  * Otherwise a vend is returned that indicates failure.
1059  */
1060 static __always_inline unsigned long
1061 __alloc_vmap_area(unsigned long size, unsigned long align,
1062 	unsigned long vstart, unsigned long vend)
1063 {
1064 	unsigned long nva_start_addr;
1065 	struct vmap_area *va;
1066 	enum fit_type type;
1067 	int ret;
1068 
1069 	va = find_vmap_lowest_match(size, align, vstart);
1070 	if (unlikely(!va))
1071 		return vend;
1072 
1073 	if (va->va_start > vstart)
1074 		nva_start_addr = ALIGN(va->va_start, align);
1075 	else
1076 		nva_start_addr = ALIGN(vstart, align);
1077 
1078 	/* Check the "vend" restriction. */
1079 	if (nva_start_addr + size > vend)
1080 		return vend;
1081 
1082 	/* Classify what we have found. */
1083 	type = classify_va_fit_type(va, nva_start_addr, size);
1084 	if (WARN_ON_ONCE(type == NOTHING_FIT))
1085 		return vend;
1086 
1087 	/* Update the free vmap_area. */
1088 	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1089 	if (ret)
1090 		return vend;
1091 
1092 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1093 	find_vmap_lowest_match_check(size);
1094 #endif
1095 
1096 	return nva_start_addr;
1097 }
1098 
1099 /*
1100  * Free a region of KVA allocated by alloc_vmap_area
1101  */
1102 static void free_vmap_area(struct vmap_area *va)
1103 {
1104 	/*
1105 	 * Remove from the busy tree/list.
1106 	 */
1107 	spin_lock(&vmap_area_lock);
1108 	unlink_va(va, &vmap_area_root);
1109 	spin_unlock(&vmap_area_lock);
1110 
1111 	/*
1112 	 * Insert/Merge it back to the free tree/list.
1113 	 */
1114 	spin_lock(&free_vmap_area_lock);
1115 	merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
1116 	spin_unlock(&free_vmap_area_lock);
1117 }
1118 
1119 /*
1120  * Allocate a region of KVA of the specified size and alignment, within the
1121  * vstart and vend.
1122  */
1123 static struct vmap_area *alloc_vmap_area(unsigned long size,
1124 				unsigned long align,
1125 				unsigned long vstart, unsigned long vend,
1126 				int node, gfp_t gfp_mask)
1127 {
1128 	struct vmap_area *va, *pva;
1129 	unsigned long addr;
1130 	int purged = 0;
1131 	int ret;
1132 
1133 	BUG_ON(!size);
1134 	BUG_ON(offset_in_page(size));
1135 	BUG_ON(!is_power_of_2(align));
1136 
1137 	if (unlikely(!vmap_initialized))
1138 		return ERR_PTR(-EBUSY);
1139 
1140 	might_sleep();
1141 	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1142 
1143 	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1144 	if (unlikely(!va))
1145 		return ERR_PTR(-ENOMEM);
1146 
1147 	/*
1148 	 * Only scan the relevant parts containing pointers to other objects
1149 	 * to avoid false negatives.
1150 	 */
1151 	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1152 
1153 retry:
1154 	/*
1155 	 * Preload this CPU with one extra vmap_area object. It is used
1156 	 * when fit type of free area is NE_FIT_TYPE. Please note, it
1157 	 * does not guarantee that an allocation occurs on a CPU that
1158 	 * is preloaded, instead we minimize the case when it is not.
1159 	 * It can happen because of cpu migration, because there is a
1160 	 * race until the below spinlock is taken.
1161 	 *
1162 	 * The preload is done in non-atomic context, thus it allows us
1163 	 * to use more permissive allocation masks to be more stable under
1164 	 * low memory condition and high memory pressure. In rare case,
1165 	 * if not preloaded, GFP_NOWAIT is used.
1166 	 *
1167 	 * Set "pva" to NULL here, because of "retry" path.
1168 	 */
1169 	pva = NULL;
1170 
1171 	if (!this_cpu_read(ne_fit_preload_node))
1172 		/*
1173 		 * Even if it fails we do not really care about that.
1174 		 * Just proceed as it is. If needed "overflow" path
1175 		 * will refill the cache we allocate from.
1176 		 */
1177 		pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1178 
1179 	spin_lock(&free_vmap_area_lock);
1180 
1181 	if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva))
1182 		kmem_cache_free(vmap_area_cachep, pva);
1183 
1184 	/*
1185 	 * If an allocation fails, the "vend" address is
1186 	 * returned. Therefore trigger the overflow path.
1187 	 */
1188 	addr = __alloc_vmap_area(size, align, vstart, vend);
1189 	spin_unlock(&free_vmap_area_lock);
1190 
1191 	if (unlikely(addr == vend))
1192 		goto overflow;
1193 
1194 	va->va_start = addr;
1195 	va->va_end = addr + size;
1196 	va->vm = NULL;
1197 
1198 
1199 	spin_lock(&vmap_area_lock);
1200 	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1201 	spin_unlock(&vmap_area_lock);
1202 
1203 	BUG_ON(!IS_ALIGNED(va->va_start, align));
1204 	BUG_ON(va->va_start < vstart);
1205 	BUG_ON(va->va_end > vend);
1206 
1207 	ret = kasan_populate_vmalloc(addr, size);
1208 	if (ret) {
1209 		free_vmap_area(va);
1210 		return ERR_PTR(ret);
1211 	}
1212 
1213 	return va;
1214 
1215 overflow:
1216 	if (!purged) {
1217 		purge_vmap_area_lazy();
1218 		purged = 1;
1219 		goto retry;
1220 	}
1221 
1222 	if (gfpflags_allow_blocking(gfp_mask)) {
1223 		unsigned long freed = 0;
1224 		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1225 		if (freed > 0) {
1226 			purged = 0;
1227 			goto retry;
1228 		}
1229 	}
1230 
1231 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1232 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1233 			size);
1234 
1235 	kmem_cache_free(vmap_area_cachep, va);
1236 	return ERR_PTR(-EBUSY);
1237 }
1238 
1239 int register_vmap_purge_notifier(struct notifier_block *nb)
1240 {
1241 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1242 }
1243 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1244 
1245 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1246 {
1247 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1248 }
1249 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1250 
1251 /*
1252  * lazy_max_pages is the maximum amount of virtual address space we gather up
1253  * before attempting to purge with a TLB flush.
1254  *
1255  * There is a tradeoff here: a larger number will cover more kernel page tables
1256  * and take slightly longer to purge, but it will linearly reduce the number of
1257  * global TLB flushes that must be performed. It would seem natural to scale
1258  * this number up linearly with the number of CPUs (because vmapping activity
1259  * could also scale linearly with the number of CPUs), however it is likely
1260  * that in practice, workloads might be constrained in other ways that mean
1261  * vmap activity will not scale linearly with CPUs. Also, I want to be
1262  * conservative and not introduce a big latency on huge systems, so go with
1263  * a less aggressive log scale. It will still be an improvement over the old
1264  * code, and it will be simple to change the scale factor if we find that it
1265  * becomes a problem on bigger systems.
1266  */
1267 static unsigned long lazy_max_pages(void)
1268 {
1269 	unsigned int log;
1270 
1271 	log = fls(num_online_cpus());
1272 
1273 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1274 }
1275 
1276 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1277 
1278 /*
1279  * Serialize vmap purging.  There is no actual criticial section protected
1280  * by this look, but we want to avoid concurrent calls for performance
1281  * reasons and to make the pcpu_get_vm_areas more deterministic.
1282  */
1283 static DEFINE_MUTEX(vmap_purge_lock);
1284 
1285 /* for per-CPU blocks */
1286 static void purge_fragmented_blocks_allcpus(void);
1287 
1288 /*
1289  * called before a call to iounmap() if the caller wants vm_area_struct's
1290  * immediately freed.
1291  */
1292 void set_iounmap_nonlazy(void)
1293 {
1294 	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1295 }
1296 
1297 /*
1298  * Purges all lazily-freed vmap areas.
1299  */
1300 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1301 {
1302 	unsigned long resched_threshold;
1303 	struct llist_node *valist;
1304 	struct vmap_area *va;
1305 	struct vmap_area *n_va;
1306 
1307 	lockdep_assert_held(&vmap_purge_lock);
1308 
1309 	valist = llist_del_all(&vmap_purge_list);
1310 	if (unlikely(valist == NULL))
1311 		return false;
1312 
1313 	/*
1314 	 * First make sure the mappings are removed from all page-tables
1315 	 * before they are freed.
1316 	 */
1317 	vmalloc_sync_unmappings();
1318 
1319 	/*
1320 	 * TODO: to calculate a flush range without looping.
1321 	 * The list can be up to lazy_max_pages() elements.
1322 	 */
1323 	llist_for_each_entry(va, valist, purge_list) {
1324 		if (va->va_start < start)
1325 			start = va->va_start;
1326 		if (va->va_end > end)
1327 			end = va->va_end;
1328 	}
1329 
1330 	flush_tlb_kernel_range(start, end);
1331 	resched_threshold = lazy_max_pages() << 1;
1332 
1333 	spin_lock(&free_vmap_area_lock);
1334 	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1335 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1336 		unsigned long orig_start = va->va_start;
1337 		unsigned long orig_end = va->va_end;
1338 
1339 		/*
1340 		 * Finally insert or merge lazily-freed area. It is
1341 		 * detached and there is no need to "unlink" it from
1342 		 * anything.
1343 		 */
1344 		va = merge_or_add_vmap_area(va, &free_vmap_area_root,
1345 					    &free_vmap_area_list);
1346 
1347 		if (is_vmalloc_or_module_addr((void *)orig_start))
1348 			kasan_release_vmalloc(orig_start, orig_end,
1349 					      va->va_start, va->va_end);
1350 
1351 		atomic_long_sub(nr, &vmap_lazy_nr);
1352 
1353 		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1354 			cond_resched_lock(&free_vmap_area_lock);
1355 	}
1356 	spin_unlock(&free_vmap_area_lock);
1357 	return true;
1358 }
1359 
1360 /*
1361  * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1362  * is already purging.
1363  */
1364 static void try_purge_vmap_area_lazy(void)
1365 {
1366 	if (mutex_trylock(&vmap_purge_lock)) {
1367 		__purge_vmap_area_lazy(ULONG_MAX, 0);
1368 		mutex_unlock(&vmap_purge_lock);
1369 	}
1370 }
1371 
1372 /*
1373  * Kick off a purge of the outstanding lazy areas.
1374  */
1375 static void purge_vmap_area_lazy(void)
1376 {
1377 	mutex_lock(&vmap_purge_lock);
1378 	purge_fragmented_blocks_allcpus();
1379 	__purge_vmap_area_lazy(ULONG_MAX, 0);
1380 	mutex_unlock(&vmap_purge_lock);
1381 }
1382 
1383 /*
1384  * Free a vmap area, caller ensuring that the area has been unmapped
1385  * and flush_cache_vunmap had been called for the correct range
1386  * previously.
1387  */
1388 static void free_vmap_area_noflush(struct vmap_area *va)
1389 {
1390 	unsigned long nr_lazy;
1391 
1392 	spin_lock(&vmap_area_lock);
1393 	unlink_va(va, &vmap_area_root);
1394 	spin_unlock(&vmap_area_lock);
1395 
1396 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1397 				PAGE_SHIFT, &vmap_lazy_nr);
1398 
1399 	/* After this point, we may free va at any time */
1400 	llist_add(&va->purge_list, &vmap_purge_list);
1401 
1402 	if (unlikely(nr_lazy > lazy_max_pages()))
1403 		try_purge_vmap_area_lazy();
1404 }
1405 
1406 /*
1407  * Free and unmap a vmap area
1408  */
1409 static void free_unmap_vmap_area(struct vmap_area *va)
1410 {
1411 	flush_cache_vunmap(va->va_start, va->va_end);
1412 	unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
1413 	if (debug_pagealloc_enabled_static())
1414 		flush_tlb_kernel_range(va->va_start, va->va_end);
1415 
1416 	free_vmap_area_noflush(va);
1417 }
1418 
1419 static struct vmap_area *find_vmap_area(unsigned long addr)
1420 {
1421 	struct vmap_area *va;
1422 
1423 	spin_lock(&vmap_area_lock);
1424 	va = __find_vmap_area(addr);
1425 	spin_unlock(&vmap_area_lock);
1426 
1427 	return va;
1428 }
1429 
1430 /*** Per cpu kva allocator ***/
1431 
1432 /*
1433  * vmap space is limited especially on 32 bit architectures. Ensure there is
1434  * room for at least 16 percpu vmap blocks per CPU.
1435  */
1436 /*
1437  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1438  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1439  * instead (we just need a rough idea)
1440  */
1441 #if BITS_PER_LONG == 32
1442 #define VMALLOC_SPACE		(128UL*1024*1024)
1443 #else
1444 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
1445 #endif
1446 
1447 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1448 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1449 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1450 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1451 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1452 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1453 #define VMAP_BBMAP_BITS		\
1454 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1455 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1456 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1457 
1458 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1459 
1460 struct vmap_block_queue {
1461 	spinlock_t lock;
1462 	struct list_head free;
1463 };
1464 
1465 struct vmap_block {
1466 	spinlock_t lock;
1467 	struct vmap_area *va;
1468 	unsigned long free, dirty;
1469 	unsigned long dirty_min, dirty_max; /*< dirty range */
1470 	struct list_head free_list;
1471 	struct rcu_head rcu_head;
1472 	struct list_head purge;
1473 };
1474 
1475 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1476 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1477 
1478 /*
1479  * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
1480  * in the free path. Could get rid of this if we change the API to return a
1481  * "cookie" from alloc, to be passed to free. But no big deal yet.
1482  */
1483 static DEFINE_SPINLOCK(vmap_block_tree_lock);
1484 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
1485 
1486 /*
1487  * We should probably have a fallback mechanism to allocate virtual memory
1488  * out of partially filled vmap blocks. However vmap block sizing should be
1489  * fairly reasonable according to the vmalloc size, so it shouldn't be a
1490  * big problem.
1491  */
1492 
1493 static unsigned long addr_to_vb_idx(unsigned long addr)
1494 {
1495 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1496 	addr /= VMAP_BLOCK_SIZE;
1497 	return addr;
1498 }
1499 
1500 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1501 {
1502 	unsigned long addr;
1503 
1504 	addr = va_start + (pages_off << PAGE_SHIFT);
1505 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1506 	return (void *)addr;
1507 }
1508 
1509 /**
1510  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1511  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1512  * @order:    how many 2^order pages should be occupied in newly allocated block
1513  * @gfp_mask: flags for the page level allocator
1514  *
1515  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1516  */
1517 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1518 {
1519 	struct vmap_block_queue *vbq;
1520 	struct vmap_block *vb;
1521 	struct vmap_area *va;
1522 	unsigned long vb_idx;
1523 	int node, err;
1524 	void *vaddr;
1525 
1526 	node = numa_node_id();
1527 
1528 	vb = kmalloc_node(sizeof(struct vmap_block),
1529 			gfp_mask & GFP_RECLAIM_MASK, node);
1530 	if (unlikely(!vb))
1531 		return ERR_PTR(-ENOMEM);
1532 
1533 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1534 					VMALLOC_START, VMALLOC_END,
1535 					node, gfp_mask);
1536 	if (IS_ERR(va)) {
1537 		kfree(vb);
1538 		return ERR_CAST(va);
1539 	}
1540 
1541 	err = radix_tree_preload(gfp_mask);
1542 	if (unlikely(err)) {
1543 		kfree(vb);
1544 		free_vmap_area(va);
1545 		return ERR_PTR(err);
1546 	}
1547 
1548 	vaddr = vmap_block_vaddr(va->va_start, 0);
1549 	spin_lock_init(&vb->lock);
1550 	vb->va = va;
1551 	/* At least something should be left free */
1552 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1553 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1554 	vb->dirty = 0;
1555 	vb->dirty_min = VMAP_BBMAP_BITS;
1556 	vb->dirty_max = 0;
1557 	INIT_LIST_HEAD(&vb->free_list);
1558 
1559 	vb_idx = addr_to_vb_idx(va->va_start);
1560 	spin_lock(&vmap_block_tree_lock);
1561 	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
1562 	spin_unlock(&vmap_block_tree_lock);
1563 	BUG_ON(err);
1564 	radix_tree_preload_end();
1565 
1566 	vbq = &get_cpu_var(vmap_block_queue);
1567 	spin_lock(&vbq->lock);
1568 	list_add_tail_rcu(&vb->free_list, &vbq->free);
1569 	spin_unlock(&vbq->lock);
1570 	put_cpu_var(vmap_block_queue);
1571 
1572 	return vaddr;
1573 }
1574 
1575 static void free_vmap_block(struct vmap_block *vb)
1576 {
1577 	struct vmap_block *tmp;
1578 	unsigned long vb_idx;
1579 
1580 	vb_idx = addr_to_vb_idx(vb->va->va_start);
1581 	spin_lock(&vmap_block_tree_lock);
1582 	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
1583 	spin_unlock(&vmap_block_tree_lock);
1584 	BUG_ON(tmp != vb);
1585 
1586 	free_vmap_area_noflush(vb->va);
1587 	kfree_rcu(vb, rcu_head);
1588 }
1589 
1590 static void purge_fragmented_blocks(int cpu)
1591 {
1592 	LIST_HEAD(purge);
1593 	struct vmap_block *vb;
1594 	struct vmap_block *n_vb;
1595 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1596 
1597 	rcu_read_lock();
1598 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1599 
1600 		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1601 			continue;
1602 
1603 		spin_lock(&vb->lock);
1604 		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1605 			vb->free = 0; /* prevent further allocs after releasing lock */
1606 			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1607 			vb->dirty_min = 0;
1608 			vb->dirty_max = VMAP_BBMAP_BITS;
1609 			spin_lock(&vbq->lock);
1610 			list_del_rcu(&vb->free_list);
1611 			spin_unlock(&vbq->lock);
1612 			spin_unlock(&vb->lock);
1613 			list_add_tail(&vb->purge, &purge);
1614 		} else
1615 			spin_unlock(&vb->lock);
1616 	}
1617 	rcu_read_unlock();
1618 
1619 	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1620 		list_del(&vb->purge);
1621 		free_vmap_block(vb);
1622 	}
1623 }
1624 
1625 static void purge_fragmented_blocks_allcpus(void)
1626 {
1627 	int cpu;
1628 
1629 	for_each_possible_cpu(cpu)
1630 		purge_fragmented_blocks(cpu);
1631 }
1632 
1633 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1634 {
1635 	struct vmap_block_queue *vbq;
1636 	struct vmap_block *vb;
1637 	void *vaddr = NULL;
1638 	unsigned int order;
1639 
1640 	BUG_ON(offset_in_page(size));
1641 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1642 	if (WARN_ON(size == 0)) {
1643 		/*
1644 		 * Allocating 0 bytes isn't what caller wants since
1645 		 * get_order(0) returns funny result. Just warn and terminate
1646 		 * early.
1647 		 */
1648 		return NULL;
1649 	}
1650 	order = get_order(size);
1651 
1652 	rcu_read_lock();
1653 	vbq = &get_cpu_var(vmap_block_queue);
1654 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1655 		unsigned long pages_off;
1656 
1657 		spin_lock(&vb->lock);
1658 		if (vb->free < (1UL << order)) {
1659 			spin_unlock(&vb->lock);
1660 			continue;
1661 		}
1662 
1663 		pages_off = VMAP_BBMAP_BITS - vb->free;
1664 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1665 		vb->free -= 1UL << order;
1666 		if (vb->free == 0) {
1667 			spin_lock(&vbq->lock);
1668 			list_del_rcu(&vb->free_list);
1669 			spin_unlock(&vbq->lock);
1670 		}
1671 
1672 		spin_unlock(&vb->lock);
1673 		break;
1674 	}
1675 
1676 	put_cpu_var(vmap_block_queue);
1677 	rcu_read_unlock();
1678 
1679 	/* Allocate new block if nothing was found */
1680 	if (!vaddr)
1681 		vaddr = new_vmap_block(order, gfp_mask);
1682 
1683 	return vaddr;
1684 }
1685 
1686 static void vb_free(unsigned long addr, unsigned long size)
1687 {
1688 	unsigned long offset;
1689 	unsigned long vb_idx;
1690 	unsigned int order;
1691 	struct vmap_block *vb;
1692 
1693 	BUG_ON(offset_in_page(size));
1694 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1695 
1696 	flush_cache_vunmap(addr, addr + size);
1697 
1698 	order = get_order(size);
1699 
1700 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
1701 
1702 	vb_idx = addr_to_vb_idx(addr);
1703 	rcu_read_lock();
1704 	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1705 	rcu_read_unlock();
1706 	BUG_ON(!vb);
1707 
1708 	unmap_kernel_range_noflush(addr, size);
1709 
1710 	if (debug_pagealloc_enabled_static())
1711 		flush_tlb_kernel_range(addr, addr + size);
1712 
1713 	spin_lock(&vb->lock);
1714 
1715 	/* Expand dirty range */
1716 	vb->dirty_min = min(vb->dirty_min, offset);
1717 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1718 
1719 	vb->dirty += 1UL << order;
1720 	if (vb->dirty == VMAP_BBMAP_BITS) {
1721 		BUG_ON(vb->free);
1722 		spin_unlock(&vb->lock);
1723 		free_vmap_block(vb);
1724 	} else
1725 		spin_unlock(&vb->lock);
1726 }
1727 
1728 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
1729 {
1730 	int cpu;
1731 
1732 	if (unlikely(!vmap_initialized))
1733 		return;
1734 
1735 	might_sleep();
1736 
1737 	for_each_possible_cpu(cpu) {
1738 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1739 		struct vmap_block *vb;
1740 
1741 		rcu_read_lock();
1742 		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1743 			spin_lock(&vb->lock);
1744 			if (vb->dirty) {
1745 				unsigned long va_start = vb->va->va_start;
1746 				unsigned long s, e;
1747 
1748 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
1749 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
1750 
1751 				start = min(s, start);
1752 				end   = max(e, end);
1753 
1754 				flush = 1;
1755 			}
1756 			spin_unlock(&vb->lock);
1757 		}
1758 		rcu_read_unlock();
1759 	}
1760 
1761 	mutex_lock(&vmap_purge_lock);
1762 	purge_fragmented_blocks_allcpus();
1763 	if (!__purge_vmap_area_lazy(start, end) && flush)
1764 		flush_tlb_kernel_range(start, end);
1765 	mutex_unlock(&vmap_purge_lock);
1766 }
1767 
1768 /**
1769  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1770  *
1771  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1772  * to amortize TLB flushing overheads. What this means is that any page you
1773  * have now, may, in a former life, have been mapped into kernel virtual
1774  * address by the vmap layer and so there might be some CPUs with TLB entries
1775  * still referencing that page (additional to the regular 1:1 kernel mapping).
1776  *
1777  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1778  * be sure that none of the pages we have control over will have any aliases
1779  * from the vmap layer.
1780  */
1781 void vm_unmap_aliases(void)
1782 {
1783 	unsigned long start = ULONG_MAX, end = 0;
1784 	int flush = 0;
1785 
1786 	_vm_unmap_aliases(start, end, flush);
1787 }
1788 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1789 
1790 /**
1791  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1792  * @mem: the pointer returned by vm_map_ram
1793  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1794  */
1795 void vm_unmap_ram(const void *mem, unsigned int count)
1796 {
1797 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1798 	unsigned long addr = (unsigned long)mem;
1799 	struct vmap_area *va;
1800 
1801 	might_sleep();
1802 	BUG_ON(!addr);
1803 	BUG_ON(addr < VMALLOC_START);
1804 	BUG_ON(addr > VMALLOC_END);
1805 	BUG_ON(!PAGE_ALIGNED(addr));
1806 
1807 	kasan_poison_vmalloc(mem, size);
1808 
1809 	if (likely(count <= VMAP_MAX_ALLOC)) {
1810 		debug_check_no_locks_freed(mem, size);
1811 		vb_free(addr, size);
1812 		return;
1813 	}
1814 
1815 	va = find_vmap_area(addr);
1816 	BUG_ON(!va);
1817 	debug_check_no_locks_freed((void *)va->va_start,
1818 				    (va->va_end - va->va_start));
1819 	free_unmap_vmap_area(va);
1820 }
1821 EXPORT_SYMBOL(vm_unmap_ram);
1822 
1823 /**
1824  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1825  * @pages: an array of pointers to the pages to be mapped
1826  * @count: number of pages
1827  * @node: prefer to allocate data structures on this node
1828  * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1829  *
1830  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1831  * faster than vmap so it's good.  But if you mix long-life and short-life
1832  * objects with vm_map_ram(), it could consume lots of address space through
1833  * fragmentation (especially on a 32bit machine).  You could see failures in
1834  * the end.  Please use this function for short-lived objects.
1835  *
1836  * Returns: a pointer to the address that has been mapped, or %NULL on failure
1837  */
1838 void *vm_map_ram(struct page **pages, unsigned int count, int node)
1839 {
1840 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1841 	unsigned long addr;
1842 	void *mem;
1843 
1844 	if (likely(count <= VMAP_MAX_ALLOC)) {
1845 		mem = vb_alloc(size, GFP_KERNEL);
1846 		if (IS_ERR(mem))
1847 			return NULL;
1848 		addr = (unsigned long)mem;
1849 	} else {
1850 		struct vmap_area *va;
1851 		va = alloc_vmap_area(size, PAGE_SIZE,
1852 				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1853 		if (IS_ERR(va))
1854 			return NULL;
1855 
1856 		addr = va->va_start;
1857 		mem = (void *)addr;
1858 	}
1859 
1860 	kasan_unpoison_vmalloc(mem, size);
1861 
1862 	if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
1863 		vm_unmap_ram(mem, count);
1864 		return NULL;
1865 	}
1866 	return mem;
1867 }
1868 EXPORT_SYMBOL(vm_map_ram);
1869 
1870 static struct vm_struct *vmlist __initdata;
1871 
1872 /**
1873  * vm_area_add_early - add vmap area early during boot
1874  * @vm: vm_struct to add
1875  *
1876  * This function is used to add fixed kernel vm area to vmlist before
1877  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
1878  * should contain proper values and the other fields should be zero.
1879  *
1880  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1881  */
1882 void __init vm_area_add_early(struct vm_struct *vm)
1883 {
1884 	struct vm_struct *tmp, **p;
1885 
1886 	BUG_ON(vmap_initialized);
1887 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1888 		if (tmp->addr >= vm->addr) {
1889 			BUG_ON(tmp->addr < vm->addr + vm->size);
1890 			break;
1891 		} else
1892 			BUG_ON(tmp->addr + tmp->size > vm->addr);
1893 	}
1894 	vm->next = *p;
1895 	*p = vm;
1896 }
1897 
1898 /**
1899  * vm_area_register_early - register vmap area early during boot
1900  * @vm: vm_struct to register
1901  * @align: requested alignment
1902  *
1903  * This function is used to register kernel vm area before
1904  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
1905  * proper values on entry and other fields should be zero.  On return,
1906  * vm->addr contains the allocated address.
1907  *
1908  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1909  */
1910 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1911 {
1912 	static size_t vm_init_off __initdata;
1913 	unsigned long addr;
1914 
1915 	addr = ALIGN(VMALLOC_START + vm_init_off, align);
1916 	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1917 
1918 	vm->addr = (void *)addr;
1919 
1920 	vm_area_add_early(vm);
1921 }
1922 
1923 static void vmap_init_free_space(void)
1924 {
1925 	unsigned long vmap_start = 1;
1926 	const unsigned long vmap_end = ULONG_MAX;
1927 	struct vmap_area *busy, *free;
1928 
1929 	/*
1930 	 *     B     F     B     B     B     F
1931 	 * -|-----|.....|-----|-----|-----|.....|-
1932 	 *  |           The KVA space           |
1933 	 *  |<--------------------------------->|
1934 	 */
1935 	list_for_each_entry(busy, &vmap_area_list, list) {
1936 		if (busy->va_start - vmap_start > 0) {
1937 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1938 			if (!WARN_ON_ONCE(!free)) {
1939 				free->va_start = vmap_start;
1940 				free->va_end = busy->va_start;
1941 
1942 				insert_vmap_area_augment(free, NULL,
1943 					&free_vmap_area_root,
1944 						&free_vmap_area_list);
1945 			}
1946 		}
1947 
1948 		vmap_start = busy->va_end;
1949 	}
1950 
1951 	if (vmap_end - vmap_start > 0) {
1952 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1953 		if (!WARN_ON_ONCE(!free)) {
1954 			free->va_start = vmap_start;
1955 			free->va_end = vmap_end;
1956 
1957 			insert_vmap_area_augment(free, NULL,
1958 				&free_vmap_area_root,
1959 					&free_vmap_area_list);
1960 		}
1961 	}
1962 }
1963 
1964 void __init vmalloc_init(void)
1965 {
1966 	struct vmap_area *va;
1967 	struct vm_struct *tmp;
1968 	int i;
1969 
1970 	/*
1971 	 * Create the cache for vmap_area objects.
1972 	 */
1973 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1974 
1975 	for_each_possible_cpu(i) {
1976 		struct vmap_block_queue *vbq;
1977 		struct vfree_deferred *p;
1978 
1979 		vbq = &per_cpu(vmap_block_queue, i);
1980 		spin_lock_init(&vbq->lock);
1981 		INIT_LIST_HEAD(&vbq->free);
1982 		p = &per_cpu(vfree_deferred, i);
1983 		init_llist_head(&p->list);
1984 		INIT_WORK(&p->wq, free_work);
1985 	}
1986 
1987 	/* Import existing vmlist entries. */
1988 	for (tmp = vmlist; tmp; tmp = tmp->next) {
1989 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1990 		if (WARN_ON_ONCE(!va))
1991 			continue;
1992 
1993 		va->va_start = (unsigned long)tmp->addr;
1994 		va->va_end = va->va_start + tmp->size;
1995 		va->vm = tmp;
1996 		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1997 	}
1998 
1999 	/*
2000 	 * Now we can initialize a free vmap space.
2001 	 */
2002 	vmap_init_free_space();
2003 	vmap_initialized = true;
2004 }
2005 
2006 /**
2007  * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2008  * @addr: start of the VM area to unmap
2009  * @size: size of the VM area to unmap
2010  *
2011  * Similar to unmap_kernel_range_noflush() but flushes vcache before
2012  * the unmapping and tlb after.
2013  */
2014 void unmap_kernel_range(unsigned long addr, unsigned long size)
2015 {
2016 	unsigned long end = addr + size;
2017 
2018 	flush_cache_vunmap(addr, end);
2019 	unmap_kernel_range_noflush(addr, size);
2020 	flush_tlb_kernel_range(addr, end);
2021 }
2022 
2023 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2024 	struct vmap_area *va, unsigned long flags, const void *caller)
2025 {
2026 	vm->flags = flags;
2027 	vm->addr = (void *)va->va_start;
2028 	vm->size = va->va_end - va->va_start;
2029 	vm->caller = caller;
2030 	va->vm = vm;
2031 }
2032 
2033 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2034 			      unsigned long flags, const void *caller)
2035 {
2036 	spin_lock(&vmap_area_lock);
2037 	setup_vmalloc_vm_locked(vm, va, flags, caller);
2038 	spin_unlock(&vmap_area_lock);
2039 }
2040 
2041 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2042 {
2043 	/*
2044 	 * Before removing VM_UNINITIALIZED,
2045 	 * we should make sure that vm has proper values.
2046 	 * Pair with smp_rmb() in show_numa_info().
2047 	 */
2048 	smp_wmb();
2049 	vm->flags &= ~VM_UNINITIALIZED;
2050 }
2051 
2052 static struct vm_struct *__get_vm_area_node(unsigned long size,
2053 		unsigned long align, unsigned long flags, unsigned long start,
2054 		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
2055 {
2056 	struct vmap_area *va;
2057 	struct vm_struct *area;
2058 	unsigned long requested_size = size;
2059 
2060 	BUG_ON(in_interrupt());
2061 	size = PAGE_ALIGN(size);
2062 	if (unlikely(!size))
2063 		return NULL;
2064 
2065 	if (flags & VM_IOREMAP)
2066 		align = 1ul << clamp_t(int, get_count_order_long(size),
2067 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2068 
2069 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2070 	if (unlikely(!area))
2071 		return NULL;
2072 
2073 	if (!(flags & VM_NO_GUARD))
2074 		size += PAGE_SIZE;
2075 
2076 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2077 	if (IS_ERR(va)) {
2078 		kfree(area);
2079 		return NULL;
2080 	}
2081 
2082 	kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2083 
2084 	setup_vmalloc_vm(area, va, flags, caller);
2085 
2086 	return area;
2087 }
2088 
2089 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2090 				       unsigned long start, unsigned long end,
2091 				       const void *caller)
2092 {
2093 	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2094 				  GFP_KERNEL, caller);
2095 }
2096 
2097 /**
2098  * get_vm_area - reserve a contiguous kernel virtual area
2099  * @size:	 size of the area
2100  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2101  *
2102  * Search an area of @size in the kernel virtual mapping area,
2103  * and reserved it for out purposes.  Returns the area descriptor
2104  * on success or %NULL on failure.
2105  *
2106  * Return: the area descriptor on success or %NULL on failure.
2107  */
2108 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2109 {
2110 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2111 				  NUMA_NO_NODE, GFP_KERNEL,
2112 				  __builtin_return_address(0));
2113 }
2114 
2115 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2116 				const void *caller)
2117 {
2118 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2119 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2120 }
2121 
2122 /**
2123  * find_vm_area - find a continuous kernel virtual area
2124  * @addr:	  base address
2125  *
2126  * Search for the kernel VM area starting at @addr, and return it.
2127  * It is up to the caller to do all required locking to keep the returned
2128  * pointer valid.
2129  *
2130  * Return: pointer to the found area or %NULL on faulure
2131  */
2132 struct vm_struct *find_vm_area(const void *addr)
2133 {
2134 	struct vmap_area *va;
2135 
2136 	va = find_vmap_area((unsigned long)addr);
2137 	if (!va)
2138 		return NULL;
2139 
2140 	return va->vm;
2141 }
2142 
2143 /**
2144  * remove_vm_area - find and remove a continuous kernel virtual area
2145  * @addr:	    base address
2146  *
2147  * Search for the kernel VM area starting at @addr, and remove it.
2148  * This function returns the found VM area, but using it is NOT safe
2149  * on SMP machines, except for its size or flags.
2150  *
2151  * Return: pointer to the found area or %NULL on faulure
2152  */
2153 struct vm_struct *remove_vm_area(const void *addr)
2154 {
2155 	struct vmap_area *va;
2156 
2157 	might_sleep();
2158 
2159 	spin_lock(&vmap_area_lock);
2160 	va = __find_vmap_area((unsigned long)addr);
2161 	if (va && va->vm) {
2162 		struct vm_struct *vm = va->vm;
2163 
2164 		va->vm = NULL;
2165 		spin_unlock(&vmap_area_lock);
2166 
2167 		kasan_free_shadow(vm);
2168 		free_unmap_vmap_area(va);
2169 
2170 		return vm;
2171 	}
2172 
2173 	spin_unlock(&vmap_area_lock);
2174 	return NULL;
2175 }
2176 
2177 static inline void set_area_direct_map(const struct vm_struct *area,
2178 				       int (*set_direct_map)(struct page *page))
2179 {
2180 	int i;
2181 
2182 	for (i = 0; i < area->nr_pages; i++)
2183 		if (page_address(area->pages[i]))
2184 			set_direct_map(area->pages[i]);
2185 }
2186 
2187 /* Handle removing and resetting vm mappings related to the vm_struct. */
2188 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2189 {
2190 	unsigned long start = ULONG_MAX, end = 0;
2191 	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2192 	int flush_dmap = 0;
2193 	int i;
2194 
2195 	remove_vm_area(area->addr);
2196 
2197 	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2198 	if (!flush_reset)
2199 		return;
2200 
2201 	/*
2202 	 * If not deallocating pages, just do the flush of the VM area and
2203 	 * return.
2204 	 */
2205 	if (!deallocate_pages) {
2206 		vm_unmap_aliases();
2207 		return;
2208 	}
2209 
2210 	/*
2211 	 * If execution gets here, flush the vm mapping and reset the direct
2212 	 * map. Find the start and end range of the direct mappings to make sure
2213 	 * the vm_unmap_aliases() flush includes the direct map.
2214 	 */
2215 	for (i = 0; i < area->nr_pages; i++) {
2216 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
2217 		if (addr) {
2218 			start = min(addr, start);
2219 			end = max(addr + PAGE_SIZE, end);
2220 			flush_dmap = 1;
2221 		}
2222 	}
2223 
2224 	/*
2225 	 * Set direct map to something invalid so that it won't be cached if
2226 	 * there are any accesses after the TLB flush, then flush the TLB and
2227 	 * reset the direct map permissions to the default.
2228 	 */
2229 	set_area_direct_map(area, set_direct_map_invalid_noflush);
2230 	_vm_unmap_aliases(start, end, flush_dmap);
2231 	set_area_direct_map(area, set_direct_map_default_noflush);
2232 }
2233 
2234 static void __vunmap(const void *addr, int deallocate_pages)
2235 {
2236 	struct vm_struct *area;
2237 
2238 	if (!addr)
2239 		return;
2240 
2241 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2242 			addr))
2243 		return;
2244 
2245 	area = find_vm_area(addr);
2246 	if (unlikely(!area)) {
2247 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2248 				addr);
2249 		return;
2250 	}
2251 
2252 	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2253 	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2254 
2255 	kasan_poison_vmalloc(area->addr, area->size);
2256 
2257 	vm_remove_mappings(area, deallocate_pages);
2258 
2259 	if (deallocate_pages) {
2260 		int i;
2261 
2262 		for (i = 0; i < area->nr_pages; i++) {
2263 			struct page *page = area->pages[i];
2264 
2265 			BUG_ON(!page);
2266 			__free_pages(page, 0);
2267 		}
2268 		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2269 
2270 		kvfree(area->pages);
2271 	}
2272 
2273 	kfree(area);
2274 	return;
2275 }
2276 
2277 static inline void __vfree_deferred(const void *addr)
2278 {
2279 	/*
2280 	 * Use raw_cpu_ptr() because this can be called from preemptible
2281 	 * context. Preemption is absolutely fine here, because the llist_add()
2282 	 * implementation is lockless, so it works even if we are adding to
2283 	 * nother cpu's list.  schedule_work() should be fine with this too.
2284 	 */
2285 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2286 
2287 	if (llist_add((struct llist_node *)addr, &p->list))
2288 		schedule_work(&p->wq);
2289 }
2290 
2291 /**
2292  * vfree_atomic - release memory allocated by vmalloc()
2293  * @addr:	  memory base address
2294  *
2295  * This one is just like vfree() but can be called in any atomic context
2296  * except NMIs.
2297  */
2298 void vfree_atomic(const void *addr)
2299 {
2300 	BUG_ON(in_nmi());
2301 
2302 	kmemleak_free(addr);
2303 
2304 	if (!addr)
2305 		return;
2306 	__vfree_deferred(addr);
2307 }
2308 
2309 static void __vfree(const void *addr)
2310 {
2311 	if (unlikely(in_interrupt()))
2312 		__vfree_deferred(addr);
2313 	else
2314 		__vunmap(addr, 1);
2315 }
2316 
2317 /**
2318  * vfree - release memory allocated by vmalloc()
2319  * @addr:  memory base address
2320  *
2321  * Free the virtually continuous memory area starting at @addr, as
2322  * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2323  * NULL, no operation is performed.
2324  *
2325  * Must not be called in NMI context (strictly speaking, only if we don't
2326  * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2327  * conventions for vfree() arch-depenedent would be a really bad idea)
2328  *
2329  * May sleep if called *not* from interrupt context.
2330  *
2331  * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
2332  */
2333 void vfree(const void *addr)
2334 {
2335 	BUG_ON(in_nmi());
2336 
2337 	kmemleak_free(addr);
2338 
2339 	might_sleep_if(!in_interrupt());
2340 
2341 	if (!addr)
2342 		return;
2343 
2344 	__vfree(addr);
2345 }
2346 EXPORT_SYMBOL(vfree);
2347 
2348 /**
2349  * vunmap - release virtual mapping obtained by vmap()
2350  * @addr:   memory base address
2351  *
2352  * Free the virtually contiguous memory area starting at @addr,
2353  * which was created from the page array passed to vmap().
2354  *
2355  * Must not be called in interrupt context.
2356  */
2357 void vunmap(const void *addr)
2358 {
2359 	BUG_ON(in_interrupt());
2360 	might_sleep();
2361 	if (addr)
2362 		__vunmap(addr, 0);
2363 }
2364 EXPORT_SYMBOL(vunmap);
2365 
2366 /**
2367  * vmap - map an array of pages into virtually contiguous space
2368  * @pages: array of page pointers
2369  * @count: number of pages to map
2370  * @flags: vm_area->flags
2371  * @prot: page protection for the mapping
2372  *
2373  * Maps @count pages from @pages into contiguous kernel virtual
2374  * space.
2375  *
2376  * Return: the address of the area or %NULL on failure
2377  */
2378 void *vmap(struct page **pages, unsigned int count,
2379 	   unsigned long flags, pgprot_t prot)
2380 {
2381 	struct vm_struct *area;
2382 	unsigned long size;		/* In bytes */
2383 
2384 	might_sleep();
2385 
2386 	if (count > totalram_pages())
2387 		return NULL;
2388 
2389 	size = (unsigned long)count << PAGE_SHIFT;
2390 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2391 	if (!area)
2392 		return NULL;
2393 
2394 	if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
2395 			pages) < 0) {
2396 		vunmap(area->addr);
2397 		return NULL;
2398 	}
2399 
2400 	return area->addr;
2401 }
2402 EXPORT_SYMBOL(vmap);
2403 
2404 static void *__vmalloc_node(unsigned long size, unsigned long align,
2405 			    gfp_t gfp_mask, pgprot_t prot,
2406 			    int node, const void *caller);
2407 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2408 				 pgprot_t prot, int node)
2409 {
2410 	struct page **pages;
2411 	unsigned int nr_pages, array_size, i;
2412 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2413 	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2414 	const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2415 					0 :
2416 					__GFP_HIGHMEM;
2417 
2418 	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2419 	array_size = (nr_pages * sizeof(struct page *));
2420 
2421 	/* Please note that the recursion is strictly bounded. */
2422 	if (array_size > PAGE_SIZE) {
2423 		pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
2424 				PAGE_KERNEL, node, area->caller);
2425 	} else {
2426 		pages = kmalloc_node(array_size, nested_gfp, node);
2427 	}
2428 
2429 	if (!pages) {
2430 		remove_vm_area(area->addr);
2431 		kfree(area);
2432 		return NULL;
2433 	}
2434 
2435 	area->pages = pages;
2436 	area->nr_pages = nr_pages;
2437 
2438 	for (i = 0; i < area->nr_pages; i++) {
2439 		struct page *page;
2440 
2441 		if (node == NUMA_NO_NODE)
2442 			page = alloc_page(alloc_mask|highmem_mask);
2443 		else
2444 			page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
2445 
2446 		if (unlikely(!page)) {
2447 			/* Successfully allocated i pages, free them in __vunmap() */
2448 			area->nr_pages = i;
2449 			atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2450 			goto fail;
2451 		}
2452 		area->pages[i] = page;
2453 		if (gfpflags_allow_blocking(gfp_mask))
2454 			cond_resched();
2455 	}
2456 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2457 
2458 	if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
2459 			prot, pages) < 0)
2460 		goto fail;
2461 
2462 	return area->addr;
2463 
2464 fail:
2465 	warn_alloc(gfp_mask, NULL,
2466 			  "vmalloc: allocation failure, allocated %ld of %ld bytes",
2467 			  (area->nr_pages*PAGE_SIZE), area->size);
2468 	__vfree(area->addr);
2469 	return NULL;
2470 }
2471 
2472 /**
2473  * __vmalloc_node_range - allocate virtually contiguous memory
2474  * @size:		  allocation size
2475  * @align:		  desired alignment
2476  * @start:		  vm area range start
2477  * @end:		  vm area range end
2478  * @gfp_mask:		  flags for the page level allocator
2479  * @prot:		  protection mask for the allocated pages
2480  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
2481  * @node:		  node to use for allocation or NUMA_NO_NODE
2482  * @caller:		  caller's return address
2483  *
2484  * Allocate enough pages to cover @size from the page level
2485  * allocator with @gfp_mask flags.  Map them into contiguous
2486  * kernel virtual space, using a pagetable protection of @prot.
2487  *
2488  * Return: the address of the area or %NULL on failure
2489  */
2490 void *__vmalloc_node_range(unsigned long size, unsigned long align,
2491 			unsigned long start, unsigned long end, gfp_t gfp_mask,
2492 			pgprot_t prot, unsigned long vm_flags, int node,
2493 			const void *caller)
2494 {
2495 	struct vm_struct *area;
2496 	void *addr;
2497 	unsigned long real_size = size;
2498 
2499 	size = PAGE_ALIGN(size);
2500 	if (!size || (size >> PAGE_SHIFT) > totalram_pages())
2501 		goto fail;
2502 
2503 	area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
2504 				vm_flags, start, end, node, gfp_mask, caller);
2505 	if (!area)
2506 		goto fail;
2507 
2508 	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2509 	if (!addr)
2510 		return NULL;
2511 
2512 	/*
2513 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2514 	 * flag. It means that vm_struct is not fully initialized.
2515 	 * Now, it is fully initialized, so remove this flag here.
2516 	 */
2517 	clear_vm_uninitialized_flag(area);
2518 
2519 	kmemleak_vmalloc(area, size, gfp_mask);
2520 
2521 	return addr;
2522 
2523 fail:
2524 	warn_alloc(gfp_mask, NULL,
2525 			  "vmalloc: allocation failure: %lu bytes", real_size);
2526 	return NULL;
2527 }
2528 
2529 /*
2530  * This is only for performance analysis of vmalloc and stress purpose.
2531  * It is required by vmalloc test module, therefore do not use it other
2532  * than that.
2533  */
2534 #ifdef CONFIG_TEST_VMALLOC_MODULE
2535 EXPORT_SYMBOL_GPL(__vmalloc_node_range);
2536 #endif
2537 
2538 /**
2539  * __vmalloc_node - allocate virtually contiguous memory
2540  * @size:	    allocation size
2541  * @align:	    desired alignment
2542  * @gfp_mask:	    flags for the page level allocator
2543  * @prot:	    protection mask for the allocated pages
2544  * @node:	    node to use for allocation or NUMA_NO_NODE
2545  * @caller:	    caller's return address
2546  *
2547  * Allocate enough pages to cover @size from the page level
2548  * allocator with @gfp_mask flags.  Map them into contiguous
2549  * kernel virtual space, using a pagetable protection of @prot.
2550  *
2551  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2552  * and __GFP_NOFAIL are not supported
2553  *
2554  * Any use of gfp flags outside of GFP_KERNEL should be consulted
2555  * with mm people.
2556  *
2557  * Return: pointer to the allocated memory or %NULL on error
2558  */
2559 static void *__vmalloc_node(unsigned long size, unsigned long align,
2560 			    gfp_t gfp_mask, pgprot_t prot,
2561 			    int node, const void *caller)
2562 {
2563 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2564 				gfp_mask, prot, 0, node, caller);
2565 }
2566 
2567 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
2568 {
2569 	return __vmalloc_node(size, 1, gfp_mask, PAGE_KERNEL, NUMA_NO_NODE,
2570 				__builtin_return_address(0));
2571 }
2572 EXPORT_SYMBOL(__vmalloc);
2573 
2574 static inline void *__vmalloc_node_flags(unsigned long size,
2575 					int node, gfp_t flags)
2576 {
2577 	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
2578 					node, __builtin_return_address(0));
2579 }
2580 
2581 
2582 void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
2583 				  void *caller)
2584 {
2585 	return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
2586 }
2587 
2588 /**
2589  * vmalloc - allocate virtually contiguous memory
2590  * @size:    allocation size
2591  *
2592  * Allocate enough pages to cover @size from the page level
2593  * allocator and map them into contiguous kernel virtual space.
2594  *
2595  * For tight control over page level allocator and protection flags
2596  * use __vmalloc() instead.
2597  *
2598  * Return: pointer to the allocated memory or %NULL on error
2599  */
2600 void *vmalloc(unsigned long size)
2601 {
2602 	return __vmalloc_node_flags(size, NUMA_NO_NODE,
2603 				    GFP_KERNEL);
2604 }
2605 EXPORT_SYMBOL(vmalloc);
2606 
2607 /**
2608  * vzalloc - allocate virtually contiguous memory with zero fill
2609  * @size:    allocation size
2610  *
2611  * Allocate enough pages to cover @size from the page level
2612  * allocator and map them into contiguous kernel virtual space.
2613  * The memory allocated is set to zero.
2614  *
2615  * For tight control over page level allocator and protection flags
2616  * use __vmalloc() instead.
2617  *
2618  * Return: pointer to the allocated memory or %NULL on error
2619  */
2620 void *vzalloc(unsigned long size)
2621 {
2622 	return __vmalloc_node_flags(size, NUMA_NO_NODE,
2623 				GFP_KERNEL | __GFP_ZERO);
2624 }
2625 EXPORT_SYMBOL(vzalloc);
2626 
2627 /**
2628  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2629  * @size: allocation size
2630  *
2631  * The resulting memory area is zeroed so it can be mapped to userspace
2632  * without leaking data.
2633  *
2634  * Return: pointer to the allocated memory or %NULL on error
2635  */
2636 void *vmalloc_user(unsigned long size)
2637 {
2638 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2639 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2640 				    VM_USERMAP, NUMA_NO_NODE,
2641 				    __builtin_return_address(0));
2642 }
2643 EXPORT_SYMBOL(vmalloc_user);
2644 
2645 /**
2646  * vmalloc_node - allocate memory on a specific node
2647  * @size:	  allocation size
2648  * @node:	  numa node
2649  *
2650  * Allocate enough pages to cover @size from the page level
2651  * allocator and map them into contiguous kernel virtual space.
2652  *
2653  * For tight control over page level allocator and protection flags
2654  * use __vmalloc() instead.
2655  *
2656  * Return: pointer to the allocated memory or %NULL on error
2657  */
2658 void *vmalloc_node(unsigned long size, int node)
2659 {
2660 	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
2661 					node, __builtin_return_address(0));
2662 }
2663 EXPORT_SYMBOL(vmalloc_node);
2664 
2665 /**
2666  * vzalloc_node - allocate memory on a specific node with zero fill
2667  * @size:	allocation size
2668  * @node:	numa node
2669  *
2670  * Allocate enough pages to cover @size from the page level
2671  * allocator and map them into contiguous kernel virtual space.
2672  * The memory allocated is set to zero.
2673  *
2674  * For tight control over page level allocator and protection flags
2675  * use __vmalloc_node() instead.
2676  *
2677  * Return: pointer to the allocated memory or %NULL on error
2678  */
2679 void *vzalloc_node(unsigned long size, int node)
2680 {
2681 	return __vmalloc_node_flags(size, node,
2682 			 GFP_KERNEL | __GFP_ZERO);
2683 }
2684 EXPORT_SYMBOL(vzalloc_node);
2685 
2686 /**
2687  * vmalloc_user_node_flags - allocate memory for userspace on a specific node
2688  * @size: allocation size
2689  * @node: numa node
2690  * @flags: flags for the page level allocator
2691  *
2692  * The resulting memory area is zeroed so it can be mapped to userspace
2693  * without leaking data.
2694  *
2695  * Return: pointer to the allocated memory or %NULL on error
2696  */
2697 void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags)
2698 {
2699 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2700 				    flags | __GFP_ZERO, PAGE_KERNEL,
2701 				    VM_USERMAP, node,
2702 				    __builtin_return_address(0));
2703 }
2704 EXPORT_SYMBOL(vmalloc_user_node_flags);
2705 
2706 /**
2707  * vmalloc_exec - allocate virtually contiguous, executable memory
2708  * @size:	  allocation size
2709  *
2710  * Kernel-internal function to allocate enough pages to cover @size
2711  * the page level allocator and map them into contiguous and
2712  * executable kernel virtual space.
2713  *
2714  * For tight control over page level allocator and protection flags
2715  * use __vmalloc() instead.
2716  *
2717  * Return: pointer to the allocated memory or %NULL on error
2718  */
2719 void *vmalloc_exec(unsigned long size)
2720 {
2721 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2722 			GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2723 			NUMA_NO_NODE, __builtin_return_address(0));
2724 }
2725 
2726 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2727 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2728 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2729 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2730 #else
2731 /*
2732  * 64b systems should always have either DMA or DMA32 zones. For others
2733  * GFP_DMA32 should do the right thing and use the normal zone.
2734  */
2735 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2736 #endif
2737 
2738 /**
2739  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2740  * @size:	allocation size
2741  *
2742  * Allocate enough 32bit PA addressable pages to cover @size from the
2743  * page level allocator and map them into contiguous kernel virtual space.
2744  *
2745  * Return: pointer to the allocated memory or %NULL on error
2746  */
2747 void *vmalloc_32(unsigned long size)
2748 {
2749 	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
2750 			      NUMA_NO_NODE, __builtin_return_address(0));
2751 }
2752 EXPORT_SYMBOL(vmalloc_32);
2753 
2754 /**
2755  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2756  * @size:	     allocation size
2757  *
2758  * The resulting memory area is 32bit addressable and zeroed so it can be
2759  * mapped to userspace without leaking data.
2760  *
2761  * Return: pointer to the allocated memory or %NULL on error
2762  */
2763 void *vmalloc_32_user(unsigned long size)
2764 {
2765 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2766 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2767 				    VM_USERMAP, NUMA_NO_NODE,
2768 				    __builtin_return_address(0));
2769 }
2770 EXPORT_SYMBOL(vmalloc_32_user);
2771 
2772 /*
2773  * small helper routine , copy contents to buf from addr.
2774  * If the page is not present, fill zero.
2775  */
2776 
2777 static int aligned_vread(char *buf, char *addr, unsigned long count)
2778 {
2779 	struct page *p;
2780 	int copied = 0;
2781 
2782 	while (count) {
2783 		unsigned long offset, length;
2784 
2785 		offset = offset_in_page(addr);
2786 		length = PAGE_SIZE - offset;
2787 		if (length > count)
2788 			length = count;
2789 		p = vmalloc_to_page(addr);
2790 		/*
2791 		 * To do safe access to this _mapped_ area, we need
2792 		 * lock. But adding lock here means that we need to add
2793 		 * overhead of vmalloc()/vfree() calles for this _debug_
2794 		 * interface, rarely used. Instead of that, we'll use
2795 		 * kmap() and get small overhead in this access function.
2796 		 */
2797 		if (p) {
2798 			/*
2799 			 * we can expect USER0 is not used (see vread/vwrite's
2800 			 * function description)
2801 			 */
2802 			void *map = kmap_atomic(p);
2803 			memcpy(buf, map + offset, length);
2804 			kunmap_atomic(map);
2805 		} else
2806 			memset(buf, 0, length);
2807 
2808 		addr += length;
2809 		buf += length;
2810 		copied += length;
2811 		count -= length;
2812 	}
2813 	return copied;
2814 }
2815 
2816 static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2817 {
2818 	struct page *p;
2819 	int copied = 0;
2820 
2821 	while (count) {
2822 		unsigned long offset, length;
2823 
2824 		offset = offset_in_page(addr);
2825 		length = PAGE_SIZE - offset;
2826 		if (length > count)
2827 			length = count;
2828 		p = vmalloc_to_page(addr);
2829 		/*
2830 		 * To do safe access to this _mapped_ area, we need
2831 		 * lock. But adding lock here means that we need to add
2832 		 * overhead of vmalloc()/vfree() calles for this _debug_
2833 		 * interface, rarely used. Instead of that, we'll use
2834 		 * kmap() and get small overhead in this access function.
2835 		 */
2836 		if (p) {
2837 			/*
2838 			 * we can expect USER0 is not used (see vread/vwrite's
2839 			 * function description)
2840 			 */
2841 			void *map = kmap_atomic(p);
2842 			memcpy(map + offset, buf, length);
2843 			kunmap_atomic(map);
2844 		}
2845 		addr += length;
2846 		buf += length;
2847 		copied += length;
2848 		count -= length;
2849 	}
2850 	return copied;
2851 }
2852 
2853 /**
2854  * vread() - read vmalloc area in a safe way.
2855  * @buf:     buffer for reading data
2856  * @addr:    vm address.
2857  * @count:   number of bytes to be read.
2858  *
2859  * This function checks that addr is a valid vmalloc'ed area, and
2860  * copy data from that area to a given buffer. If the given memory range
2861  * of [addr...addr+count) includes some valid address, data is copied to
2862  * proper area of @buf. If there are memory holes, they'll be zero-filled.
2863  * IOREMAP area is treated as memory hole and no copy is done.
2864  *
2865  * If [addr...addr+count) doesn't includes any intersects with alive
2866  * vm_struct area, returns 0. @buf should be kernel's buffer.
2867  *
2868  * Note: In usual ops, vread() is never necessary because the caller
2869  * should know vmalloc() area is valid and can use memcpy().
2870  * This is for routines which have to access vmalloc area without
2871  * any information, as /dev/kmem.
2872  *
2873  * Return: number of bytes for which addr and buf should be increased
2874  * (same number as @count) or %0 if [addr...addr+count) doesn't
2875  * include any intersection with valid vmalloc area
2876  */
2877 long vread(char *buf, char *addr, unsigned long count)
2878 {
2879 	struct vmap_area *va;
2880 	struct vm_struct *vm;
2881 	char *vaddr, *buf_start = buf;
2882 	unsigned long buflen = count;
2883 	unsigned long n;
2884 
2885 	/* Don't allow overflow */
2886 	if ((unsigned long) addr + count < count)
2887 		count = -(unsigned long) addr;
2888 
2889 	spin_lock(&vmap_area_lock);
2890 	list_for_each_entry(va, &vmap_area_list, list) {
2891 		if (!count)
2892 			break;
2893 
2894 		if (!va->vm)
2895 			continue;
2896 
2897 		vm = va->vm;
2898 		vaddr = (char *) vm->addr;
2899 		if (addr >= vaddr + get_vm_area_size(vm))
2900 			continue;
2901 		while (addr < vaddr) {
2902 			if (count == 0)
2903 				goto finished;
2904 			*buf = '\0';
2905 			buf++;
2906 			addr++;
2907 			count--;
2908 		}
2909 		n = vaddr + get_vm_area_size(vm) - addr;
2910 		if (n > count)
2911 			n = count;
2912 		if (!(vm->flags & VM_IOREMAP))
2913 			aligned_vread(buf, addr, n);
2914 		else /* IOREMAP area is treated as memory hole */
2915 			memset(buf, 0, n);
2916 		buf += n;
2917 		addr += n;
2918 		count -= n;
2919 	}
2920 finished:
2921 	spin_unlock(&vmap_area_lock);
2922 
2923 	if (buf == buf_start)
2924 		return 0;
2925 	/* zero-fill memory holes */
2926 	if (buf != buf_start + buflen)
2927 		memset(buf, 0, buflen - (buf - buf_start));
2928 
2929 	return buflen;
2930 }
2931 
2932 /**
2933  * vwrite() - write vmalloc area in a safe way.
2934  * @buf:      buffer for source data
2935  * @addr:     vm address.
2936  * @count:    number of bytes to be read.
2937  *
2938  * This function checks that addr is a valid vmalloc'ed area, and
2939  * copy data from a buffer to the given addr. If specified range of
2940  * [addr...addr+count) includes some valid address, data is copied from
2941  * proper area of @buf. If there are memory holes, no copy to hole.
2942  * IOREMAP area is treated as memory hole and no copy is done.
2943  *
2944  * If [addr...addr+count) doesn't includes any intersects with alive
2945  * vm_struct area, returns 0. @buf should be kernel's buffer.
2946  *
2947  * Note: In usual ops, vwrite() is never necessary because the caller
2948  * should know vmalloc() area is valid and can use memcpy().
2949  * This is for routines which have to access vmalloc area without
2950  * any information, as /dev/kmem.
2951  *
2952  * Return: number of bytes for which addr and buf should be
2953  * increased (same number as @count) or %0 if [addr...addr+count)
2954  * doesn't include any intersection with valid vmalloc area
2955  */
2956 long vwrite(char *buf, char *addr, unsigned long count)
2957 {
2958 	struct vmap_area *va;
2959 	struct vm_struct *vm;
2960 	char *vaddr;
2961 	unsigned long n, buflen;
2962 	int copied = 0;
2963 
2964 	/* Don't allow overflow */
2965 	if ((unsigned long) addr + count < count)
2966 		count = -(unsigned long) addr;
2967 	buflen = count;
2968 
2969 	spin_lock(&vmap_area_lock);
2970 	list_for_each_entry(va, &vmap_area_list, list) {
2971 		if (!count)
2972 			break;
2973 
2974 		if (!va->vm)
2975 			continue;
2976 
2977 		vm = va->vm;
2978 		vaddr = (char *) vm->addr;
2979 		if (addr >= vaddr + get_vm_area_size(vm))
2980 			continue;
2981 		while (addr < vaddr) {
2982 			if (count == 0)
2983 				goto finished;
2984 			buf++;
2985 			addr++;
2986 			count--;
2987 		}
2988 		n = vaddr + get_vm_area_size(vm) - addr;
2989 		if (n > count)
2990 			n = count;
2991 		if (!(vm->flags & VM_IOREMAP)) {
2992 			aligned_vwrite(buf, addr, n);
2993 			copied++;
2994 		}
2995 		buf += n;
2996 		addr += n;
2997 		count -= n;
2998 	}
2999 finished:
3000 	spin_unlock(&vmap_area_lock);
3001 	if (!copied)
3002 		return 0;
3003 	return buflen;
3004 }
3005 
3006 /**
3007  * remap_vmalloc_range_partial - map vmalloc pages to userspace
3008  * @vma:		vma to cover
3009  * @uaddr:		target user address to start at
3010  * @kaddr:		virtual address of vmalloc kernel memory
3011  * @pgoff:		offset from @kaddr to start at
3012  * @size:		size of map area
3013  *
3014  * Returns:	0 for success, -Exxx on failure
3015  *
3016  * This function checks that @kaddr is a valid vmalloc'ed area,
3017  * and that it is big enough to cover the range starting at
3018  * @uaddr in @vma. Will return failure if that criteria isn't
3019  * met.
3020  *
3021  * Similar to remap_pfn_range() (see mm/memory.c)
3022  */
3023 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3024 				void *kaddr, unsigned long pgoff,
3025 				unsigned long size)
3026 {
3027 	struct vm_struct *area;
3028 	unsigned long off;
3029 	unsigned long end_index;
3030 
3031 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3032 		return -EINVAL;
3033 
3034 	size = PAGE_ALIGN(size);
3035 
3036 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3037 		return -EINVAL;
3038 
3039 	area = find_vm_area(kaddr);
3040 	if (!area)
3041 		return -EINVAL;
3042 
3043 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3044 		return -EINVAL;
3045 
3046 	if (check_add_overflow(size, off, &end_index) ||
3047 	    end_index > get_vm_area_size(area))
3048 		return -EINVAL;
3049 	kaddr += off;
3050 
3051 	do {
3052 		struct page *page = vmalloc_to_page(kaddr);
3053 		int ret;
3054 
3055 		ret = vm_insert_page(vma, uaddr, page);
3056 		if (ret)
3057 			return ret;
3058 
3059 		uaddr += PAGE_SIZE;
3060 		kaddr += PAGE_SIZE;
3061 		size -= PAGE_SIZE;
3062 	} while (size > 0);
3063 
3064 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3065 
3066 	return 0;
3067 }
3068 EXPORT_SYMBOL(remap_vmalloc_range_partial);
3069 
3070 /**
3071  * remap_vmalloc_range - map vmalloc pages to userspace
3072  * @vma:		vma to cover (map full range of vma)
3073  * @addr:		vmalloc memory
3074  * @pgoff:		number of pages into addr before first page to map
3075  *
3076  * Returns:	0 for success, -Exxx on failure
3077  *
3078  * This function checks that addr is a valid vmalloc'ed area, and
3079  * that it is big enough to cover the vma. Will return failure if
3080  * that criteria isn't met.
3081  *
3082  * Similar to remap_pfn_range() (see mm/memory.c)
3083  */
3084 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3085 						unsigned long pgoff)
3086 {
3087 	return remap_vmalloc_range_partial(vma, vma->vm_start,
3088 					   addr, pgoff,
3089 					   vma->vm_end - vma->vm_start);
3090 }
3091 EXPORT_SYMBOL(remap_vmalloc_range);
3092 
3093 /*
3094  * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
3095  * not to have one.
3096  *
3097  * The purpose of this function is to make sure the vmalloc area
3098  * mappings are identical in all page-tables in the system.
3099  */
3100 void __weak vmalloc_sync_mappings(void)
3101 {
3102 }
3103 
3104 void __weak vmalloc_sync_unmappings(void)
3105 {
3106 }
3107 
3108 static int f(pte_t *pte, unsigned long addr, void *data)
3109 {
3110 	pte_t ***p = data;
3111 
3112 	if (p) {
3113 		*(*p) = pte;
3114 		(*p)++;
3115 	}
3116 	return 0;
3117 }
3118 
3119 /**
3120  * alloc_vm_area - allocate a range of kernel address space
3121  * @size:	   size of the area
3122  * @ptes:	   returns the PTEs for the address space
3123  *
3124  * Returns:	NULL on failure, vm_struct on success
3125  *
3126  * This function reserves a range of kernel address space, and
3127  * allocates pagetables to map that range.  No actual mappings
3128  * are created.
3129  *
3130  * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3131  * allocated for the VM area are returned.
3132  */
3133 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
3134 {
3135 	struct vm_struct *area;
3136 
3137 	area = get_vm_area_caller(size, VM_IOREMAP,
3138 				__builtin_return_address(0));
3139 	if (area == NULL)
3140 		return NULL;
3141 
3142 	/*
3143 	 * This ensures that page tables are constructed for this region
3144 	 * of kernel virtual address space and mapped into init_mm.
3145 	 */
3146 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3147 				size, f, ptes ? &ptes : NULL)) {
3148 		free_vm_area(area);
3149 		return NULL;
3150 	}
3151 
3152 	return area;
3153 }
3154 EXPORT_SYMBOL_GPL(alloc_vm_area);
3155 
3156 void free_vm_area(struct vm_struct *area)
3157 {
3158 	struct vm_struct *ret;
3159 	ret = remove_vm_area(area->addr);
3160 	BUG_ON(ret != area);
3161 	kfree(area);
3162 }
3163 EXPORT_SYMBOL_GPL(free_vm_area);
3164 
3165 #ifdef CONFIG_SMP
3166 static struct vmap_area *node_to_va(struct rb_node *n)
3167 {
3168 	return rb_entry_safe(n, struct vmap_area, rb_node);
3169 }
3170 
3171 /**
3172  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3173  * @addr: target address
3174  *
3175  * Returns: vmap_area if it is found. If there is no such area
3176  *   the first highest(reverse order) vmap_area is returned
3177  *   i.e. va->va_start < addr && va->va_end < addr or NULL
3178  *   if there are no any areas before @addr.
3179  */
3180 static struct vmap_area *
3181 pvm_find_va_enclose_addr(unsigned long addr)
3182 {
3183 	struct vmap_area *va, *tmp;
3184 	struct rb_node *n;
3185 
3186 	n = free_vmap_area_root.rb_node;
3187 	va = NULL;
3188 
3189 	while (n) {
3190 		tmp = rb_entry(n, struct vmap_area, rb_node);
3191 		if (tmp->va_start <= addr) {
3192 			va = tmp;
3193 			if (tmp->va_end >= addr)
3194 				break;
3195 
3196 			n = n->rb_right;
3197 		} else {
3198 			n = n->rb_left;
3199 		}
3200 	}
3201 
3202 	return va;
3203 }
3204 
3205 /**
3206  * pvm_determine_end_from_reverse - find the highest aligned address
3207  * of free block below VMALLOC_END
3208  * @va:
3209  *   in - the VA we start the search(reverse order);
3210  *   out - the VA with the highest aligned end address.
3211  *
3212  * Returns: determined end address within vmap_area
3213  */
3214 static unsigned long
3215 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3216 {
3217 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3218 	unsigned long addr;
3219 
3220 	if (likely(*va)) {
3221 		list_for_each_entry_from_reverse((*va),
3222 				&free_vmap_area_list, list) {
3223 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3224 			if ((*va)->va_start < addr)
3225 				return addr;
3226 		}
3227 	}
3228 
3229 	return 0;
3230 }
3231 
3232 /**
3233  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3234  * @offsets: array containing offset of each area
3235  * @sizes: array containing size of each area
3236  * @nr_vms: the number of areas to allocate
3237  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3238  *
3239  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3240  *	    vm_structs on success, %NULL on failure
3241  *
3242  * Percpu allocator wants to use congruent vm areas so that it can
3243  * maintain the offsets among percpu areas.  This function allocates
3244  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3245  * be scattered pretty far, distance between two areas easily going up
3246  * to gigabytes.  To avoid interacting with regular vmallocs, these
3247  * areas are allocated from top.
3248  *
3249  * Despite its complicated look, this allocator is rather simple. It
3250  * does everything top-down and scans free blocks from the end looking
3251  * for matching base. While scanning, if any of the areas do not fit the
3252  * base address is pulled down to fit the area. Scanning is repeated till
3253  * all the areas fit and then all necessary data structures are inserted
3254  * and the result is returned.
3255  */
3256 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3257 				     const size_t *sizes, int nr_vms,
3258 				     size_t align)
3259 {
3260 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3261 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3262 	struct vmap_area **vas, *va;
3263 	struct vm_struct **vms;
3264 	int area, area2, last_area, term_area;
3265 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
3266 	bool purged = false;
3267 	enum fit_type type;
3268 
3269 	/* verify parameters and allocate data structures */
3270 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3271 	for (last_area = 0, area = 0; area < nr_vms; area++) {
3272 		start = offsets[area];
3273 		end = start + sizes[area];
3274 
3275 		/* is everything aligned properly? */
3276 		BUG_ON(!IS_ALIGNED(offsets[area], align));
3277 		BUG_ON(!IS_ALIGNED(sizes[area], align));
3278 
3279 		/* detect the area with the highest address */
3280 		if (start > offsets[last_area])
3281 			last_area = area;
3282 
3283 		for (area2 = area + 1; area2 < nr_vms; area2++) {
3284 			unsigned long start2 = offsets[area2];
3285 			unsigned long end2 = start2 + sizes[area2];
3286 
3287 			BUG_ON(start2 < end && start < end2);
3288 		}
3289 	}
3290 	last_end = offsets[last_area] + sizes[last_area];
3291 
3292 	if (vmalloc_end - vmalloc_start < last_end) {
3293 		WARN_ON(true);
3294 		return NULL;
3295 	}
3296 
3297 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3298 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3299 	if (!vas || !vms)
3300 		goto err_free2;
3301 
3302 	for (area = 0; area < nr_vms; area++) {
3303 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3304 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3305 		if (!vas[area] || !vms[area])
3306 			goto err_free;
3307 	}
3308 retry:
3309 	spin_lock(&free_vmap_area_lock);
3310 
3311 	/* start scanning - we scan from the top, begin with the last area */
3312 	area = term_area = last_area;
3313 	start = offsets[area];
3314 	end = start + sizes[area];
3315 
3316 	va = pvm_find_va_enclose_addr(vmalloc_end);
3317 	base = pvm_determine_end_from_reverse(&va, align) - end;
3318 
3319 	while (true) {
3320 		/*
3321 		 * base might have underflowed, add last_end before
3322 		 * comparing.
3323 		 */
3324 		if (base + last_end < vmalloc_start + last_end)
3325 			goto overflow;
3326 
3327 		/*
3328 		 * Fitting base has not been found.
3329 		 */
3330 		if (va == NULL)
3331 			goto overflow;
3332 
3333 		/*
3334 		 * If required width exceeds current VA block, move
3335 		 * base downwards and then recheck.
3336 		 */
3337 		if (base + end > va->va_end) {
3338 			base = pvm_determine_end_from_reverse(&va, align) - end;
3339 			term_area = area;
3340 			continue;
3341 		}
3342 
3343 		/*
3344 		 * If this VA does not fit, move base downwards and recheck.
3345 		 */
3346 		if (base + start < va->va_start) {
3347 			va = node_to_va(rb_prev(&va->rb_node));
3348 			base = pvm_determine_end_from_reverse(&va, align) - end;
3349 			term_area = area;
3350 			continue;
3351 		}
3352 
3353 		/*
3354 		 * This area fits, move on to the previous one.  If
3355 		 * the previous one is the terminal one, we're done.
3356 		 */
3357 		area = (area + nr_vms - 1) % nr_vms;
3358 		if (area == term_area)
3359 			break;
3360 
3361 		start = offsets[area];
3362 		end = start + sizes[area];
3363 		va = pvm_find_va_enclose_addr(base + end);
3364 	}
3365 
3366 	/* we've found a fitting base, insert all va's */
3367 	for (area = 0; area < nr_vms; area++) {
3368 		int ret;
3369 
3370 		start = base + offsets[area];
3371 		size = sizes[area];
3372 
3373 		va = pvm_find_va_enclose_addr(start);
3374 		if (WARN_ON_ONCE(va == NULL))
3375 			/* It is a BUG(), but trigger recovery instead. */
3376 			goto recovery;
3377 
3378 		type = classify_va_fit_type(va, start, size);
3379 		if (WARN_ON_ONCE(type == NOTHING_FIT))
3380 			/* It is a BUG(), but trigger recovery instead. */
3381 			goto recovery;
3382 
3383 		ret = adjust_va_to_fit_type(va, start, size, type);
3384 		if (unlikely(ret))
3385 			goto recovery;
3386 
3387 		/* Allocated area. */
3388 		va = vas[area];
3389 		va->va_start = start;
3390 		va->va_end = start + size;
3391 	}
3392 
3393 	spin_unlock(&free_vmap_area_lock);
3394 
3395 	/* populate the kasan shadow space */
3396 	for (area = 0; area < nr_vms; area++) {
3397 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3398 			goto err_free_shadow;
3399 
3400 		kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3401 				       sizes[area]);
3402 	}
3403 
3404 	/* insert all vm's */
3405 	spin_lock(&vmap_area_lock);
3406 	for (area = 0; area < nr_vms; area++) {
3407 		insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3408 
3409 		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3410 				 pcpu_get_vm_areas);
3411 	}
3412 	spin_unlock(&vmap_area_lock);
3413 
3414 	kfree(vas);
3415 	return vms;
3416 
3417 recovery:
3418 	/*
3419 	 * Remove previously allocated areas. There is no
3420 	 * need in removing these areas from the busy tree,
3421 	 * because they are inserted only on the final step
3422 	 * and when pcpu_get_vm_areas() is success.
3423 	 */
3424 	while (area--) {
3425 		orig_start = vas[area]->va_start;
3426 		orig_end = vas[area]->va_end;
3427 		va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3428 					    &free_vmap_area_list);
3429 		kasan_release_vmalloc(orig_start, orig_end,
3430 				      va->va_start, va->va_end);
3431 		vas[area] = NULL;
3432 	}
3433 
3434 overflow:
3435 	spin_unlock(&free_vmap_area_lock);
3436 	if (!purged) {
3437 		purge_vmap_area_lazy();
3438 		purged = true;
3439 
3440 		/* Before "retry", check if we recover. */
3441 		for (area = 0; area < nr_vms; area++) {
3442 			if (vas[area])
3443 				continue;
3444 
3445 			vas[area] = kmem_cache_zalloc(
3446 				vmap_area_cachep, GFP_KERNEL);
3447 			if (!vas[area])
3448 				goto err_free;
3449 		}
3450 
3451 		goto retry;
3452 	}
3453 
3454 err_free:
3455 	for (area = 0; area < nr_vms; area++) {
3456 		if (vas[area])
3457 			kmem_cache_free(vmap_area_cachep, vas[area]);
3458 
3459 		kfree(vms[area]);
3460 	}
3461 err_free2:
3462 	kfree(vas);
3463 	kfree(vms);
3464 	return NULL;
3465 
3466 err_free_shadow:
3467 	spin_lock(&free_vmap_area_lock);
3468 	/*
3469 	 * We release all the vmalloc shadows, even the ones for regions that
3470 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
3471 	 * being able to tolerate this case.
3472 	 */
3473 	for (area = 0; area < nr_vms; area++) {
3474 		orig_start = vas[area]->va_start;
3475 		orig_end = vas[area]->va_end;
3476 		va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3477 					    &free_vmap_area_list);
3478 		kasan_release_vmalloc(orig_start, orig_end,
3479 				      va->va_start, va->va_end);
3480 		vas[area] = NULL;
3481 		kfree(vms[area]);
3482 	}
3483 	spin_unlock(&free_vmap_area_lock);
3484 	kfree(vas);
3485 	kfree(vms);
3486 	return NULL;
3487 }
3488 
3489 /**
3490  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3491  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3492  * @nr_vms: the number of allocated areas
3493  *
3494  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3495  */
3496 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3497 {
3498 	int i;
3499 
3500 	for (i = 0; i < nr_vms; i++)
3501 		free_vm_area(vms[i]);
3502 	kfree(vms);
3503 }
3504 #endif	/* CONFIG_SMP */
3505 
3506 #ifdef CONFIG_PROC_FS
3507 static void *s_start(struct seq_file *m, loff_t *pos)
3508 	__acquires(&vmap_purge_lock)
3509 	__acquires(&vmap_area_lock)
3510 {
3511 	mutex_lock(&vmap_purge_lock);
3512 	spin_lock(&vmap_area_lock);
3513 
3514 	return seq_list_start(&vmap_area_list, *pos);
3515 }
3516 
3517 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3518 {
3519 	return seq_list_next(p, &vmap_area_list, pos);
3520 }
3521 
3522 static void s_stop(struct seq_file *m, void *p)
3523 	__releases(&vmap_purge_lock)
3524 	__releases(&vmap_area_lock)
3525 {
3526 	mutex_unlock(&vmap_purge_lock);
3527 	spin_unlock(&vmap_area_lock);
3528 }
3529 
3530 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3531 {
3532 	if (IS_ENABLED(CONFIG_NUMA)) {
3533 		unsigned int nr, *counters = m->private;
3534 
3535 		if (!counters)
3536 			return;
3537 
3538 		if (v->flags & VM_UNINITIALIZED)
3539 			return;
3540 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3541 		smp_rmb();
3542 
3543 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3544 
3545 		for (nr = 0; nr < v->nr_pages; nr++)
3546 			counters[page_to_nid(v->pages[nr])]++;
3547 
3548 		for_each_node_state(nr, N_HIGH_MEMORY)
3549 			if (counters[nr])
3550 				seq_printf(m, " N%u=%u", nr, counters[nr]);
3551 	}
3552 }
3553 
3554 static void show_purge_info(struct seq_file *m)
3555 {
3556 	struct llist_node *head;
3557 	struct vmap_area *va;
3558 
3559 	head = READ_ONCE(vmap_purge_list.first);
3560 	if (head == NULL)
3561 		return;
3562 
3563 	llist_for_each_entry(va, head, purge_list) {
3564 		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3565 			(void *)va->va_start, (void *)va->va_end,
3566 			va->va_end - va->va_start);
3567 	}
3568 }
3569 
3570 static int s_show(struct seq_file *m, void *p)
3571 {
3572 	struct vmap_area *va;
3573 	struct vm_struct *v;
3574 
3575 	va = list_entry(p, struct vmap_area, list);
3576 
3577 	/*
3578 	 * s_show can encounter race with remove_vm_area, !vm on behalf
3579 	 * of vmap area is being tear down or vm_map_ram allocation.
3580 	 */
3581 	if (!va->vm) {
3582 		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3583 			(void *)va->va_start, (void *)va->va_end,
3584 			va->va_end - va->va_start);
3585 
3586 		return 0;
3587 	}
3588 
3589 	v = va->vm;
3590 
3591 	seq_printf(m, "0x%pK-0x%pK %7ld",
3592 		v->addr, v->addr + v->size, v->size);
3593 
3594 	if (v->caller)
3595 		seq_printf(m, " %pS", v->caller);
3596 
3597 	if (v->nr_pages)
3598 		seq_printf(m, " pages=%d", v->nr_pages);
3599 
3600 	if (v->phys_addr)
3601 		seq_printf(m, " phys=%pa", &v->phys_addr);
3602 
3603 	if (v->flags & VM_IOREMAP)
3604 		seq_puts(m, " ioremap");
3605 
3606 	if (v->flags & VM_ALLOC)
3607 		seq_puts(m, " vmalloc");
3608 
3609 	if (v->flags & VM_MAP)
3610 		seq_puts(m, " vmap");
3611 
3612 	if (v->flags & VM_USERMAP)
3613 		seq_puts(m, " user");
3614 
3615 	if (v->flags & VM_DMA_COHERENT)
3616 		seq_puts(m, " dma-coherent");
3617 
3618 	if (is_vmalloc_addr(v->pages))
3619 		seq_puts(m, " vpages");
3620 
3621 	show_numa_info(m, v);
3622 	seq_putc(m, '\n');
3623 
3624 	/*
3625 	 * As a final step, dump "unpurged" areas. Note,
3626 	 * that entire "/proc/vmallocinfo" output will not
3627 	 * be address sorted, because the purge list is not
3628 	 * sorted.
3629 	 */
3630 	if (list_is_last(&va->list, &vmap_area_list))
3631 		show_purge_info(m);
3632 
3633 	return 0;
3634 }
3635 
3636 static const struct seq_operations vmalloc_op = {
3637 	.start = s_start,
3638 	.next = s_next,
3639 	.stop = s_stop,
3640 	.show = s_show,
3641 };
3642 
3643 static int __init proc_vmalloc_init(void)
3644 {
3645 	if (IS_ENABLED(CONFIG_NUMA))
3646 		proc_create_seq_private("vmallocinfo", 0400, NULL,
3647 				&vmalloc_op,
3648 				nr_node_ids * sizeof(unsigned int), NULL);
3649 	else
3650 		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3651 	return 0;
3652 }
3653 module_init(proc_vmalloc_init);
3654 
3655 #endif
3656