xref: /openbmc/linux/mm/vmalloc.c (revision b664e06d)
1 /*
2  *  linux/mm/vmalloc.c
3  *
4  *  Copyright (C) 1993  Linus Torvalds
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8  *  Numa awareness, Christoph Lameter, SGI, June 2005
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/radix-tree.h>
28 #include <linux/rcupdate.h>
29 #include <linux/pfn.h>
30 #include <linux/kmemleak.h>
31 #include <linux/atomic.h>
32 #include <linux/compiler.h>
33 #include <linux/llist.h>
34 #include <linux/bitops.h>
35 #include <linux/rbtree_augmented.h>
36 
37 #include <linux/uaccess.h>
38 #include <asm/tlbflush.h>
39 #include <asm/shmparam.h>
40 
41 #include "internal.h"
42 
43 struct vfree_deferred {
44 	struct llist_head list;
45 	struct work_struct wq;
46 };
47 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
48 
49 static void __vunmap(const void *, int);
50 
51 static void free_work(struct work_struct *w)
52 {
53 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
54 	struct llist_node *t, *llnode;
55 
56 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
57 		__vunmap((void *)llnode, 1);
58 }
59 
60 /*** Page table manipulation functions ***/
61 
62 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
63 {
64 	pte_t *pte;
65 
66 	pte = pte_offset_kernel(pmd, addr);
67 	do {
68 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
69 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70 	} while (pte++, addr += PAGE_SIZE, addr != end);
71 }
72 
73 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
74 {
75 	pmd_t *pmd;
76 	unsigned long next;
77 
78 	pmd = pmd_offset(pud, addr);
79 	do {
80 		next = pmd_addr_end(addr, end);
81 		if (pmd_clear_huge(pmd))
82 			continue;
83 		if (pmd_none_or_clear_bad(pmd))
84 			continue;
85 		vunmap_pte_range(pmd, addr, next);
86 	} while (pmd++, addr = next, addr != end);
87 }
88 
89 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
90 {
91 	pud_t *pud;
92 	unsigned long next;
93 
94 	pud = pud_offset(p4d, addr);
95 	do {
96 		next = pud_addr_end(addr, end);
97 		if (pud_clear_huge(pud))
98 			continue;
99 		if (pud_none_or_clear_bad(pud))
100 			continue;
101 		vunmap_pmd_range(pud, addr, next);
102 	} while (pud++, addr = next, addr != end);
103 }
104 
105 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
106 {
107 	p4d_t *p4d;
108 	unsigned long next;
109 
110 	p4d = p4d_offset(pgd, addr);
111 	do {
112 		next = p4d_addr_end(addr, end);
113 		if (p4d_clear_huge(p4d))
114 			continue;
115 		if (p4d_none_or_clear_bad(p4d))
116 			continue;
117 		vunmap_pud_range(p4d, addr, next);
118 	} while (p4d++, addr = next, addr != end);
119 }
120 
121 static void vunmap_page_range(unsigned long addr, unsigned long end)
122 {
123 	pgd_t *pgd;
124 	unsigned long next;
125 
126 	BUG_ON(addr >= end);
127 	pgd = pgd_offset_k(addr);
128 	do {
129 		next = pgd_addr_end(addr, end);
130 		if (pgd_none_or_clear_bad(pgd))
131 			continue;
132 		vunmap_p4d_range(pgd, addr, next);
133 	} while (pgd++, addr = next, addr != end);
134 }
135 
136 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
137 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
138 {
139 	pte_t *pte;
140 
141 	/*
142 	 * nr is a running index into the array which helps higher level
143 	 * callers keep track of where we're up to.
144 	 */
145 
146 	pte = pte_alloc_kernel(pmd, addr);
147 	if (!pte)
148 		return -ENOMEM;
149 	do {
150 		struct page *page = pages[*nr];
151 
152 		if (WARN_ON(!pte_none(*pte)))
153 			return -EBUSY;
154 		if (WARN_ON(!page))
155 			return -ENOMEM;
156 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
157 		(*nr)++;
158 	} while (pte++, addr += PAGE_SIZE, addr != end);
159 	return 0;
160 }
161 
162 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
163 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
164 {
165 	pmd_t *pmd;
166 	unsigned long next;
167 
168 	pmd = pmd_alloc(&init_mm, pud, addr);
169 	if (!pmd)
170 		return -ENOMEM;
171 	do {
172 		next = pmd_addr_end(addr, end);
173 		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
174 			return -ENOMEM;
175 	} while (pmd++, addr = next, addr != end);
176 	return 0;
177 }
178 
179 static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
180 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
181 {
182 	pud_t *pud;
183 	unsigned long next;
184 
185 	pud = pud_alloc(&init_mm, p4d, addr);
186 	if (!pud)
187 		return -ENOMEM;
188 	do {
189 		next = pud_addr_end(addr, end);
190 		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
191 			return -ENOMEM;
192 	} while (pud++, addr = next, addr != end);
193 	return 0;
194 }
195 
196 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
197 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
198 {
199 	p4d_t *p4d;
200 	unsigned long next;
201 
202 	p4d = p4d_alloc(&init_mm, pgd, addr);
203 	if (!p4d)
204 		return -ENOMEM;
205 	do {
206 		next = p4d_addr_end(addr, end);
207 		if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
208 			return -ENOMEM;
209 	} while (p4d++, addr = next, addr != end);
210 	return 0;
211 }
212 
213 /*
214  * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
215  * will have pfns corresponding to the "pages" array.
216  *
217  * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
218  */
219 static int vmap_page_range_noflush(unsigned long start, unsigned long end,
220 				   pgprot_t prot, struct page **pages)
221 {
222 	pgd_t *pgd;
223 	unsigned long next;
224 	unsigned long addr = start;
225 	int err = 0;
226 	int nr = 0;
227 
228 	BUG_ON(addr >= end);
229 	pgd = pgd_offset_k(addr);
230 	do {
231 		next = pgd_addr_end(addr, end);
232 		err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
233 		if (err)
234 			return err;
235 	} while (pgd++, addr = next, addr != end);
236 
237 	return nr;
238 }
239 
240 static int vmap_page_range(unsigned long start, unsigned long end,
241 			   pgprot_t prot, struct page **pages)
242 {
243 	int ret;
244 
245 	ret = vmap_page_range_noflush(start, end, prot, pages);
246 	flush_cache_vmap(start, end);
247 	return ret;
248 }
249 
250 int is_vmalloc_or_module_addr(const void *x)
251 {
252 	/*
253 	 * ARM, x86-64 and sparc64 put modules in a special place,
254 	 * and fall back on vmalloc() if that fails. Others
255 	 * just put it in the vmalloc space.
256 	 */
257 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
258 	unsigned long addr = (unsigned long)x;
259 	if (addr >= MODULES_VADDR && addr < MODULES_END)
260 		return 1;
261 #endif
262 	return is_vmalloc_addr(x);
263 }
264 
265 /*
266  * Walk a vmap address to the struct page it maps.
267  */
268 struct page *vmalloc_to_page(const void *vmalloc_addr)
269 {
270 	unsigned long addr = (unsigned long) vmalloc_addr;
271 	struct page *page = NULL;
272 	pgd_t *pgd = pgd_offset_k(addr);
273 	p4d_t *p4d;
274 	pud_t *pud;
275 	pmd_t *pmd;
276 	pte_t *ptep, pte;
277 
278 	/*
279 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
280 	 * architectures that do not vmalloc module space
281 	 */
282 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
283 
284 	if (pgd_none(*pgd))
285 		return NULL;
286 	p4d = p4d_offset(pgd, addr);
287 	if (p4d_none(*p4d))
288 		return NULL;
289 	pud = pud_offset(p4d, addr);
290 
291 	/*
292 	 * Don't dereference bad PUD or PMD (below) entries. This will also
293 	 * identify huge mappings, which we may encounter on architectures
294 	 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
295 	 * identified as vmalloc addresses by is_vmalloc_addr(), but are
296 	 * not [unambiguously] associated with a struct page, so there is
297 	 * no correct value to return for them.
298 	 */
299 	WARN_ON_ONCE(pud_bad(*pud));
300 	if (pud_none(*pud) || pud_bad(*pud))
301 		return NULL;
302 	pmd = pmd_offset(pud, addr);
303 	WARN_ON_ONCE(pmd_bad(*pmd));
304 	if (pmd_none(*pmd) || pmd_bad(*pmd))
305 		return NULL;
306 
307 	ptep = pte_offset_map(pmd, addr);
308 	pte = *ptep;
309 	if (pte_present(pte))
310 		page = pte_page(pte);
311 	pte_unmap(ptep);
312 	return page;
313 }
314 EXPORT_SYMBOL(vmalloc_to_page);
315 
316 /*
317  * Map a vmalloc()-space virtual address to the physical page frame number.
318  */
319 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
320 {
321 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
322 }
323 EXPORT_SYMBOL(vmalloc_to_pfn);
324 
325 
326 /*** Global kva allocator ***/
327 
328 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
329 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
330 
331 #define VM_LAZY_FREE	0x02
332 #define VM_VM_AREA	0x04
333 
334 static DEFINE_SPINLOCK(vmap_area_lock);
335 /* Export for kexec only */
336 LIST_HEAD(vmap_area_list);
337 static LLIST_HEAD(vmap_purge_list);
338 static struct rb_root vmap_area_root = RB_ROOT;
339 static bool vmap_initialized __read_mostly;
340 
341 /*
342  * This kmem_cache is used for vmap_area objects. Instead of
343  * allocating from slab we reuse an object from this cache to
344  * make things faster. Especially in "no edge" splitting of
345  * free block.
346  */
347 static struct kmem_cache *vmap_area_cachep;
348 
349 /*
350  * This linked list is used in pair with free_vmap_area_root.
351  * It gives O(1) access to prev/next to perform fast coalescing.
352  */
353 static LIST_HEAD(free_vmap_area_list);
354 
355 /*
356  * This augment red-black tree represents the free vmap space.
357  * All vmap_area objects in this tree are sorted by va->va_start
358  * address. It is used for allocation and merging when a vmap
359  * object is released.
360  *
361  * Each vmap_area node contains a maximum available free block
362  * of its sub-tree, right or left. Therefore it is possible to
363  * find a lowest match of free area.
364  */
365 static struct rb_root free_vmap_area_root = RB_ROOT;
366 
367 static __always_inline unsigned long
368 va_size(struct vmap_area *va)
369 {
370 	return (va->va_end - va->va_start);
371 }
372 
373 static __always_inline unsigned long
374 get_subtree_max_size(struct rb_node *node)
375 {
376 	struct vmap_area *va;
377 
378 	va = rb_entry_safe(node, struct vmap_area, rb_node);
379 	return va ? va->subtree_max_size : 0;
380 }
381 
382 /*
383  * Gets called when remove the node and rotate.
384  */
385 static __always_inline unsigned long
386 compute_subtree_max_size(struct vmap_area *va)
387 {
388 	return max3(va_size(va),
389 		get_subtree_max_size(va->rb_node.rb_left),
390 		get_subtree_max_size(va->rb_node.rb_right));
391 }
392 
393 RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb,
394 	struct vmap_area, rb_node, unsigned long, subtree_max_size,
395 	compute_subtree_max_size)
396 
397 static void purge_vmap_area_lazy(void);
398 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
399 static unsigned long lazy_max_pages(void);
400 
401 static struct vmap_area *__find_vmap_area(unsigned long addr)
402 {
403 	struct rb_node *n = vmap_area_root.rb_node;
404 
405 	while (n) {
406 		struct vmap_area *va;
407 
408 		va = rb_entry(n, struct vmap_area, rb_node);
409 		if (addr < va->va_start)
410 			n = n->rb_left;
411 		else if (addr >= va->va_end)
412 			n = n->rb_right;
413 		else
414 			return va;
415 	}
416 
417 	return NULL;
418 }
419 
420 /*
421  * This function returns back addresses of parent node
422  * and its left or right link for further processing.
423  */
424 static __always_inline struct rb_node **
425 find_va_links(struct vmap_area *va,
426 	struct rb_root *root, struct rb_node *from,
427 	struct rb_node **parent)
428 {
429 	struct vmap_area *tmp_va;
430 	struct rb_node **link;
431 
432 	if (root) {
433 		link = &root->rb_node;
434 		if (unlikely(!*link)) {
435 			*parent = NULL;
436 			return link;
437 		}
438 	} else {
439 		link = &from;
440 	}
441 
442 	/*
443 	 * Go to the bottom of the tree. When we hit the last point
444 	 * we end up with parent rb_node and correct direction, i name
445 	 * it link, where the new va->rb_node will be attached to.
446 	 */
447 	do {
448 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
449 
450 		/*
451 		 * During the traversal we also do some sanity check.
452 		 * Trigger the BUG() if there are sides(left/right)
453 		 * or full overlaps.
454 		 */
455 		if (va->va_start < tmp_va->va_end &&
456 				va->va_end <= tmp_va->va_start)
457 			link = &(*link)->rb_left;
458 		else if (va->va_end > tmp_va->va_start &&
459 				va->va_start >= tmp_va->va_end)
460 			link = &(*link)->rb_right;
461 		else
462 			BUG();
463 	} while (*link);
464 
465 	*parent = &tmp_va->rb_node;
466 	return link;
467 }
468 
469 static __always_inline struct list_head *
470 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
471 {
472 	struct list_head *list;
473 
474 	if (unlikely(!parent))
475 		/*
476 		 * The red-black tree where we try to find VA neighbors
477 		 * before merging or inserting is empty, i.e. it means
478 		 * there is no free vmap space. Normally it does not
479 		 * happen but we handle this case anyway.
480 		 */
481 		return NULL;
482 
483 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
484 	return (&parent->rb_right == link ? list->next : list);
485 }
486 
487 static __always_inline void
488 link_va(struct vmap_area *va, struct rb_root *root,
489 	struct rb_node *parent, struct rb_node **link, struct list_head *head)
490 {
491 	/*
492 	 * VA is still not in the list, but we can
493 	 * identify its future previous list_head node.
494 	 */
495 	if (likely(parent)) {
496 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
497 		if (&parent->rb_right != link)
498 			head = head->prev;
499 	}
500 
501 	/* Insert to the rb-tree */
502 	rb_link_node(&va->rb_node, parent, link);
503 	if (root == &free_vmap_area_root) {
504 		/*
505 		 * Some explanation here. Just perform simple insertion
506 		 * to the tree. We do not set va->subtree_max_size to
507 		 * its current size before calling rb_insert_augmented().
508 		 * It is because of we populate the tree from the bottom
509 		 * to parent levels when the node _is_ in the tree.
510 		 *
511 		 * Therefore we set subtree_max_size to zero after insertion,
512 		 * to let __augment_tree_propagate_from() puts everything to
513 		 * the correct order later on.
514 		 */
515 		rb_insert_augmented(&va->rb_node,
516 			root, &free_vmap_area_rb_augment_cb);
517 		va->subtree_max_size = 0;
518 	} else {
519 		rb_insert_color(&va->rb_node, root);
520 	}
521 
522 	/* Address-sort this list */
523 	list_add(&va->list, head);
524 }
525 
526 static __always_inline void
527 unlink_va(struct vmap_area *va, struct rb_root *root)
528 {
529 	/*
530 	 * During merging a VA node can be empty, therefore
531 	 * not linked with the tree nor list. Just check it.
532 	 */
533 	if (!RB_EMPTY_NODE(&va->rb_node)) {
534 		if (root == &free_vmap_area_root)
535 			rb_erase_augmented(&va->rb_node,
536 				root, &free_vmap_area_rb_augment_cb);
537 		else
538 			rb_erase(&va->rb_node, root);
539 
540 		list_del(&va->list);
541 		RB_CLEAR_NODE(&va->rb_node);
542 	}
543 }
544 
545 #if DEBUG_AUGMENT_PROPAGATE_CHECK
546 static void
547 augment_tree_propagate_check(struct rb_node *n)
548 {
549 	struct vmap_area *va;
550 	struct rb_node *node;
551 	unsigned long size;
552 	bool found = false;
553 
554 	if (n == NULL)
555 		return;
556 
557 	va = rb_entry(n, struct vmap_area, rb_node);
558 	size = va->subtree_max_size;
559 	node = n;
560 
561 	while (node) {
562 		va = rb_entry(node, struct vmap_area, rb_node);
563 
564 		if (get_subtree_max_size(node->rb_left) == size) {
565 			node = node->rb_left;
566 		} else {
567 			if (va_size(va) == size) {
568 				found = true;
569 				break;
570 			}
571 
572 			node = node->rb_right;
573 		}
574 	}
575 
576 	if (!found) {
577 		va = rb_entry(n, struct vmap_area, rb_node);
578 		pr_emerg("tree is corrupted: %lu, %lu\n",
579 			va_size(va), va->subtree_max_size);
580 	}
581 
582 	augment_tree_propagate_check(n->rb_left);
583 	augment_tree_propagate_check(n->rb_right);
584 }
585 #endif
586 
587 /*
588  * This function populates subtree_max_size from bottom to upper
589  * levels starting from VA point. The propagation must be done
590  * when VA size is modified by changing its va_start/va_end. Or
591  * in case of newly inserting of VA to the tree.
592  *
593  * It means that __augment_tree_propagate_from() must be called:
594  * - After VA has been inserted to the tree(free path);
595  * - After VA has been shrunk(allocation path);
596  * - After VA has been increased(merging path).
597  *
598  * Please note that, it does not mean that upper parent nodes
599  * and their subtree_max_size are recalculated all the time up
600  * to the root node.
601  *
602  *       4--8
603  *        /\
604  *       /  \
605  *      /    \
606  *    2--2  8--8
607  *
608  * For example if we modify the node 4, shrinking it to 2, then
609  * no any modification is required. If we shrink the node 2 to 1
610  * its subtree_max_size is updated only, and set to 1. If we shrink
611  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
612  * node becomes 4--6.
613  */
614 static __always_inline void
615 augment_tree_propagate_from(struct vmap_area *va)
616 {
617 	struct rb_node *node = &va->rb_node;
618 	unsigned long new_va_sub_max_size;
619 
620 	while (node) {
621 		va = rb_entry(node, struct vmap_area, rb_node);
622 		new_va_sub_max_size = compute_subtree_max_size(va);
623 
624 		/*
625 		 * If the newly calculated maximum available size of the
626 		 * subtree is equal to the current one, then it means that
627 		 * the tree is propagated correctly. So we have to stop at
628 		 * this point to save cycles.
629 		 */
630 		if (va->subtree_max_size == new_va_sub_max_size)
631 			break;
632 
633 		va->subtree_max_size = new_va_sub_max_size;
634 		node = rb_parent(&va->rb_node);
635 	}
636 
637 #if DEBUG_AUGMENT_PROPAGATE_CHECK
638 	augment_tree_propagate_check(free_vmap_area_root.rb_node);
639 #endif
640 }
641 
642 static void
643 insert_vmap_area(struct vmap_area *va,
644 	struct rb_root *root, struct list_head *head)
645 {
646 	struct rb_node **link;
647 	struct rb_node *parent;
648 
649 	link = find_va_links(va, root, NULL, &parent);
650 	link_va(va, root, parent, link, head);
651 }
652 
653 static void
654 insert_vmap_area_augment(struct vmap_area *va,
655 	struct rb_node *from, struct rb_root *root,
656 	struct list_head *head)
657 {
658 	struct rb_node **link;
659 	struct rb_node *parent;
660 
661 	if (from)
662 		link = find_va_links(va, NULL, from, &parent);
663 	else
664 		link = find_va_links(va, root, NULL, &parent);
665 
666 	link_va(va, root, parent, link, head);
667 	augment_tree_propagate_from(va);
668 }
669 
670 /*
671  * Merge de-allocated chunk of VA memory with previous
672  * and next free blocks. If coalesce is not done a new
673  * free area is inserted. If VA has been merged, it is
674  * freed.
675  */
676 static __always_inline void
677 merge_or_add_vmap_area(struct vmap_area *va,
678 	struct rb_root *root, struct list_head *head)
679 {
680 	struct vmap_area *sibling;
681 	struct list_head *next;
682 	struct rb_node **link;
683 	struct rb_node *parent;
684 	bool merged = false;
685 
686 	/*
687 	 * Find a place in the tree where VA potentially will be
688 	 * inserted, unless it is merged with its sibling/siblings.
689 	 */
690 	link = find_va_links(va, root, NULL, &parent);
691 
692 	/*
693 	 * Get next node of VA to check if merging can be done.
694 	 */
695 	next = get_va_next_sibling(parent, link);
696 	if (unlikely(next == NULL))
697 		goto insert;
698 
699 	/*
700 	 * start            end
701 	 * |                |
702 	 * |<------VA------>|<-----Next----->|
703 	 *                  |                |
704 	 *                  start            end
705 	 */
706 	if (next != head) {
707 		sibling = list_entry(next, struct vmap_area, list);
708 		if (sibling->va_start == va->va_end) {
709 			sibling->va_start = va->va_start;
710 
711 			/* Check and update the tree if needed. */
712 			augment_tree_propagate_from(sibling);
713 
714 			/* Remove this VA, it has been merged. */
715 			unlink_va(va, root);
716 
717 			/* Free vmap_area object. */
718 			kmem_cache_free(vmap_area_cachep, va);
719 
720 			/* Point to the new merged area. */
721 			va = sibling;
722 			merged = true;
723 		}
724 	}
725 
726 	/*
727 	 * start            end
728 	 * |                |
729 	 * |<-----Prev----->|<------VA------>|
730 	 *                  |                |
731 	 *                  start            end
732 	 */
733 	if (next->prev != head) {
734 		sibling = list_entry(next->prev, struct vmap_area, list);
735 		if (sibling->va_end == va->va_start) {
736 			sibling->va_end = va->va_end;
737 
738 			/* Check and update the tree if needed. */
739 			augment_tree_propagate_from(sibling);
740 
741 			/* Remove this VA, it has been merged. */
742 			unlink_va(va, root);
743 
744 			/* Free vmap_area object. */
745 			kmem_cache_free(vmap_area_cachep, va);
746 
747 			return;
748 		}
749 	}
750 
751 insert:
752 	if (!merged) {
753 		link_va(va, root, parent, link, head);
754 		augment_tree_propagate_from(va);
755 	}
756 }
757 
758 static __always_inline bool
759 is_within_this_va(struct vmap_area *va, unsigned long size,
760 	unsigned long align, unsigned long vstart)
761 {
762 	unsigned long nva_start_addr;
763 
764 	if (va->va_start > vstart)
765 		nva_start_addr = ALIGN(va->va_start, align);
766 	else
767 		nva_start_addr = ALIGN(vstart, align);
768 
769 	/* Can be overflowed due to big size or alignment. */
770 	if (nva_start_addr + size < nva_start_addr ||
771 			nva_start_addr < vstart)
772 		return false;
773 
774 	return (nva_start_addr + size <= va->va_end);
775 }
776 
777 /*
778  * Find the first free block(lowest start address) in the tree,
779  * that will accomplish the request corresponding to passing
780  * parameters.
781  */
782 static __always_inline struct vmap_area *
783 find_vmap_lowest_match(unsigned long size,
784 	unsigned long align, unsigned long vstart)
785 {
786 	struct vmap_area *va;
787 	struct rb_node *node;
788 	unsigned long length;
789 
790 	/* Start from the root. */
791 	node = free_vmap_area_root.rb_node;
792 
793 	/* Adjust the search size for alignment overhead. */
794 	length = size + align - 1;
795 
796 	while (node) {
797 		va = rb_entry(node, struct vmap_area, rb_node);
798 
799 		if (get_subtree_max_size(node->rb_left) >= length &&
800 				vstart < va->va_start) {
801 			node = node->rb_left;
802 		} else {
803 			if (is_within_this_va(va, size, align, vstart))
804 				return va;
805 
806 			/*
807 			 * Does not make sense to go deeper towards the right
808 			 * sub-tree if it does not have a free block that is
809 			 * equal or bigger to the requested search length.
810 			 */
811 			if (get_subtree_max_size(node->rb_right) >= length) {
812 				node = node->rb_right;
813 				continue;
814 			}
815 
816 			/*
817 			 * OK. We roll back and find the fist right sub-tree,
818 			 * that will satisfy the search criteria. It can happen
819 			 * only once due to "vstart" restriction.
820 			 */
821 			while ((node = rb_parent(node))) {
822 				va = rb_entry(node, struct vmap_area, rb_node);
823 				if (is_within_this_va(va, size, align, vstart))
824 					return va;
825 
826 				if (get_subtree_max_size(node->rb_right) >= length &&
827 						vstart <= va->va_start) {
828 					node = node->rb_right;
829 					break;
830 				}
831 			}
832 		}
833 	}
834 
835 	return NULL;
836 }
837 
838 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
839 #include <linux/random.h>
840 
841 static struct vmap_area *
842 find_vmap_lowest_linear_match(unsigned long size,
843 	unsigned long align, unsigned long vstart)
844 {
845 	struct vmap_area *va;
846 
847 	list_for_each_entry(va, &free_vmap_area_list, list) {
848 		if (!is_within_this_va(va, size, align, vstart))
849 			continue;
850 
851 		return va;
852 	}
853 
854 	return NULL;
855 }
856 
857 static void
858 find_vmap_lowest_match_check(unsigned long size)
859 {
860 	struct vmap_area *va_1, *va_2;
861 	unsigned long vstart;
862 	unsigned int rnd;
863 
864 	get_random_bytes(&rnd, sizeof(rnd));
865 	vstart = VMALLOC_START + rnd;
866 
867 	va_1 = find_vmap_lowest_match(size, 1, vstart);
868 	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
869 
870 	if (va_1 != va_2)
871 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
872 			va_1, va_2, vstart);
873 }
874 #endif
875 
876 enum fit_type {
877 	NOTHING_FIT = 0,
878 	FL_FIT_TYPE = 1,	/* full fit */
879 	LE_FIT_TYPE = 2,	/* left edge fit */
880 	RE_FIT_TYPE = 3,	/* right edge fit */
881 	NE_FIT_TYPE = 4		/* no edge fit */
882 };
883 
884 static __always_inline enum fit_type
885 classify_va_fit_type(struct vmap_area *va,
886 	unsigned long nva_start_addr, unsigned long size)
887 {
888 	enum fit_type type;
889 
890 	/* Check if it is within VA. */
891 	if (nva_start_addr < va->va_start ||
892 			nva_start_addr + size > va->va_end)
893 		return NOTHING_FIT;
894 
895 	/* Now classify. */
896 	if (va->va_start == nva_start_addr) {
897 		if (va->va_end == nva_start_addr + size)
898 			type = FL_FIT_TYPE;
899 		else
900 			type = LE_FIT_TYPE;
901 	} else if (va->va_end == nva_start_addr + size) {
902 		type = RE_FIT_TYPE;
903 	} else {
904 		type = NE_FIT_TYPE;
905 	}
906 
907 	return type;
908 }
909 
910 static __always_inline int
911 adjust_va_to_fit_type(struct vmap_area *va,
912 	unsigned long nva_start_addr, unsigned long size,
913 	enum fit_type type)
914 {
915 	struct vmap_area *lva;
916 
917 	if (type == FL_FIT_TYPE) {
918 		/*
919 		 * No need to split VA, it fully fits.
920 		 *
921 		 * |               |
922 		 * V      NVA      V
923 		 * |---------------|
924 		 */
925 		unlink_va(va, &free_vmap_area_root);
926 		kmem_cache_free(vmap_area_cachep, va);
927 	} else if (type == LE_FIT_TYPE) {
928 		/*
929 		 * Split left edge of fit VA.
930 		 *
931 		 * |       |
932 		 * V  NVA  V   R
933 		 * |-------|-------|
934 		 */
935 		va->va_start += size;
936 	} else if (type == RE_FIT_TYPE) {
937 		/*
938 		 * Split right edge of fit VA.
939 		 *
940 		 *         |       |
941 		 *     L   V  NVA  V
942 		 * |-------|-------|
943 		 */
944 		va->va_end = nva_start_addr;
945 	} else if (type == NE_FIT_TYPE) {
946 		/*
947 		 * Split no edge of fit VA.
948 		 *
949 		 *     |       |
950 		 *   L V  NVA  V R
951 		 * |---|-------|---|
952 		 */
953 		lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
954 		if (unlikely(!lva))
955 			return -1;
956 
957 		/*
958 		 * Build the remainder.
959 		 */
960 		lva->va_start = va->va_start;
961 		lva->va_end = nva_start_addr;
962 
963 		/*
964 		 * Shrink this VA to remaining size.
965 		 */
966 		va->va_start = nva_start_addr + size;
967 	} else {
968 		return -1;
969 	}
970 
971 	if (type != FL_FIT_TYPE) {
972 		augment_tree_propagate_from(va);
973 
974 		if (type == NE_FIT_TYPE)
975 			insert_vmap_area_augment(lva, &va->rb_node,
976 				&free_vmap_area_root, &free_vmap_area_list);
977 	}
978 
979 	return 0;
980 }
981 
982 /*
983  * Returns a start address of the newly allocated area, if success.
984  * Otherwise a vend is returned that indicates failure.
985  */
986 static __always_inline unsigned long
987 __alloc_vmap_area(unsigned long size, unsigned long align,
988 	unsigned long vstart, unsigned long vend, int node)
989 {
990 	unsigned long nva_start_addr;
991 	struct vmap_area *va;
992 	enum fit_type type;
993 	int ret;
994 
995 	va = find_vmap_lowest_match(size, align, vstart);
996 	if (unlikely(!va))
997 		return vend;
998 
999 	if (va->va_start > vstart)
1000 		nva_start_addr = ALIGN(va->va_start, align);
1001 	else
1002 		nva_start_addr = ALIGN(vstart, align);
1003 
1004 	/* Check the "vend" restriction. */
1005 	if (nva_start_addr + size > vend)
1006 		return vend;
1007 
1008 	/* Classify what we have found. */
1009 	type = classify_va_fit_type(va, nva_start_addr, size);
1010 	if (WARN_ON_ONCE(type == NOTHING_FIT))
1011 		return vend;
1012 
1013 	/* Update the free vmap_area. */
1014 	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1015 	if (ret)
1016 		return vend;
1017 
1018 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1019 	find_vmap_lowest_match_check(size);
1020 #endif
1021 
1022 	return nva_start_addr;
1023 }
1024 
1025 /*
1026  * Allocate a region of KVA of the specified size and alignment, within the
1027  * vstart and vend.
1028  */
1029 static struct vmap_area *alloc_vmap_area(unsigned long size,
1030 				unsigned long align,
1031 				unsigned long vstart, unsigned long vend,
1032 				int node, gfp_t gfp_mask)
1033 {
1034 	struct vmap_area *va;
1035 	unsigned long addr;
1036 	int purged = 0;
1037 
1038 	BUG_ON(!size);
1039 	BUG_ON(offset_in_page(size));
1040 	BUG_ON(!is_power_of_2(align));
1041 
1042 	if (unlikely(!vmap_initialized))
1043 		return ERR_PTR(-EBUSY);
1044 
1045 	might_sleep();
1046 
1047 	va = kmem_cache_alloc_node(vmap_area_cachep,
1048 			gfp_mask & GFP_RECLAIM_MASK, node);
1049 	if (unlikely(!va))
1050 		return ERR_PTR(-ENOMEM);
1051 
1052 	/*
1053 	 * Only scan the relevant parts containing pointers to other objects
1054 	 * to avoid false negatives.
1055 	 */
1056 	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
1057 
1058 retry:
1059 	spin_lock(&vmap_area_lock);
1060 
1061 	/*
1062 	 * If an allocation fails, the "vend" address is
1063 	 * returned. Therefore trigger the overflow path.
1064 	 */
1065 	addr = __alloc_vmap_area(size, align, vstart, vend, node);
1066 	if (unlikely(addr == vend))
1067 		goto overflow;
1068 
1069 	va->va_start = addr;
1070 	va->va_end = addr + size;
1071 	va->flags = 0;
1072 	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1073 
1074 	spin_unlock(&vmap_area_lock);
1075 
1076 	BUG_ON(!IS_ALIGNED(va->va_start, align));
1077 	BUG_ON(va->va_start < vstart);
1078 	BUG_ON(va->va_end > vend);
1079 
1080 	return va;
1081 
1082 overflow:
1083 	spin_unlock(&vmap_area_lock);
1084 	if (!purged) {
1085 		purge_vmap_area_lazy();
1086 		purged = 1;
1087 		goto retry;
1088 	}
1089 
1090 	if (gfpflags_allow_blocking(gfp_mask)) {
1091 		unsigned long freed = 0;
1092 		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1093 		if (freed > 0) {
1094 			purged = 0;
1095 			goto retry;
1096 		}
1097 	}
1098 
1099 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1100 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1101 			size);
1102 
1103 	kmem_cache_free(vmap_area_cachep, va);
1104 	return ERR_PTR(-EBUSY);
1105 }
1106 
1107 int register_vmap_purge_notifier(struct notifier_block *nb)
1108 {
1109 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1110 }
1111 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1112 
1113 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1114 {
1115 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1116 }
1117 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1118 
1119 static void __free_vmap_area(struct vmap_area *va)
1120 {
1121 	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
1122 
1123 	/*
1124 	 * Remove from the busy tree/list.
1125 	 */
1126 	unlink_va(va, &vmap_area_root);
1127 
1128 	/*
1129 	 * Merge VA with its neighbors, otherwise just add it.
1130 	 */
1131 	merge_or_add_vmap_area(va,
1132 		&free_vmap_area_root, &free_vmap_area_list);
1133 }
1134 
1135 /*
1136  * Free a region of KVA allocated by alloc_vmap_area
1137  */
1138 static void free_vmap_area(struct vmap_area *va)
1139 {
1140 	spin_lock(&vmap_area_lock);
1141 	__free_vmap_area(va);
1142 	spin_unlock(&vmap_area_lock);
1143 }
1144 
1145 /*
1146  * Clear the pagetable entries of a given vmap_area
1147  */
1148 static void unmap_vmap_area(struct vmap_area *va)
1149 {
1150 	vunmap_page_range(va->va_start, va->va_end);
1151 }
1152 
1153 /*
1154  * lazy_max_pages is the maximum amount of virtual address space we gather up
1155  * before attempting to purge with a TLB flush.
1156  *
1157  * There is a tradeoff here: a larger number will cover more kernel page tables
1158  * and take slightly longer to purge, but it will linearly reduce the number of
1159  * global TLB flushes that must be performed. It would seem natural to scale
1160  * this number up linearly with the number of CPUs (because vmapping activity
1161  * could also scale linearly with the number of CPUs), however it is likely
1162  * that in practice, workloads might be constrained in other ways that mean
1163  * vmap activity will not scale linearly with CPUs. Also, I want to be
1164  * conservative and not introduce a big latency on huge systems, so go with
1165  * a less aggressive log scale. It will still be an improvement over the old
1166  * code, and it will be simple to change the scale factor if we find that it
1167  * becomes a problem on bigger systems.
1168  */
1169 static unsigned long lazy_max_pages(void)
1170 {
1171 	unsigned int log;
1172 
1173 	log = fls(num_online_cpus());
1174 
1175 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1176 }
1177 
1178 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1179 
1180 /*
1181  * Serialize vmap purging.  There is no actual criticial section protected
1182  * by this look, but we want to avoid concurrent calls for performance
1183  * reasons and to make the pcpu_get_vm_areas more deterministic.
1184  */
1185 static DEFINE_MUTEX(vmap_purge_lock);
1186 
1187 /* for per-CPU blocks */
1188 static void purge_fragmented_blocks_allcpus(void);
1189 
1190 /*
1191  * called before a call to iounmap() if the caller wants vm_area_struct's
1192  * immediately freed.
1193  */
1194 void set_iounmap_nonlazy(void)
1195 {
1196 	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1197 }
1198 
1199 /*
1200  * Purges all lazily-freed vmap areas.
1201  */
1202 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1203 {
1204 	unsigned long resched_threshold;
1205 	struct llist_node *valist;
1206 	struct vmap_area *va;
1207 	struct vmap_area *n_va;
1208 
1209 	lockdep_assert_held(&vmap_purge_lock);
1210 
1211 	valist = llist_del_all(&vmap_purge_list);
1212 	if (unlikely(valist == NULL))
1213 		return false;
1214 
1215 	/*
1216 	 * TODO: to calculate a flush range without looping.
1217 	 * The list can be up to lazy_max_pages() elements.
1218 	 */
1219 	llist_for_each_entry(va, valist, purge_list) {
1220 		if (va->va_start < start)
1221 			start = va->va_start;
1222 		if (va->va_end > end)
1223 			end = va->va_end;
1224 	}
1225 
1226 	flush_tlb_kernel_range(start, end);
1227 	resched_threshold = lazy_max_pages() << 1;
1228 
1229 	spin_lock(&vmap_area_lock);
1230 	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1231 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1232 
1233 		__free_vmap_area(va);
1234 		atomic_long_sub(nr, &vmap_lazy_nr);
1235 
1236 		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1237 			cond_resched_lock(&vmap_area_lock);
1238 	}
1239 	spin_unlock(&vmap_area_lock);
1240 	return true;
1241 }
1242 
1243 /*
1244  * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1245  * is already purging.
1246  */
1247 static void try_purge_vmap_area_lazy(void)
1248 {
1249 	if (mutex_trylock(&vmap_purge_lock)) {
1250 		__purge_vmap_area_lazy(ULONG_MAX, 0);
1251 		mutex_unlock(&vmap_purge_lock);
1252 	}
1253 }
1254 
1255 /*
1256  * Kick off a purge of the outstanding lazy areas.
1257  */
1258 static void purge_vmap_area_lazy(void)
1259 {
1260 	mutex_lock(&vmap_purge_lock);
1261 	purge_fragmented_blocks_allcpus();
1262 	__purge_vmap_area_lazy(ULONG_MAX, 0);
1263 	mutex_unlock(&vmap_purge_lock);
1264 }
1265 
1266 /*
1267  * Free a vmap area, caller ensuring that the area has been unmapped
1268  * and flush_cache_vunmap had been called for the correct range
1269  * previously.
1270  */
1271 static void free_vmap_area_noflush(struct vmap_area *va)
1272 {
1273 	unsigned long nr_lazy;
1274 
1275 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1276 				PAGE_SHIFT, &vmap_lazy_nr);
1277 
1278 	/* After this point, we may free va at any time */
1279 	llist_add(&va->purge_list, &vmap_purge_list);
1280 
1281 	if (unlikely(nr_lazy > lazy_max_pages()))
1282 		try_purge_vmap_area_lazy();
1283 }
1284 
1285 /*
1286  * Free and unmap a vmap area
1287  */
1288 static void free_unmap_vmap_area(struct vmap_area *va)
1289 {
1290 	flush_cache_vunmap(va->va_start, va->va_end);
1291 	unmap_vmap_area(va);
1292 	if (debug_pagealloc_enabled())
1293 		flush_tlb_kernel_range(va->va_start, va->va_end);
1294 
1295 	free_vmap_area_noflush(va);
1296 }
1297 
1298 static struct vmap_area *find_vmap_area(unsigned long addr)
1299 {
1300 	struct vmap_area *va;
1301 
1302 	spin_lock(&vmap_area_lock);
1303 	va = __find_vmap_area(addr);
1304 	spin_unlock(&vmap_area_lock);
1305 
1306 	return va;
1307 }
1308 
1309 /*** Per cpu kva allocator ***/
1310 
1311 /*
1312  * vmap space is limited especially on 32 bit architectures. Ensure there is
1313  * room for at least 16 percpu vmap blocks per CPU.
1314  */
1315 /*
1316  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1317  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1318  * instead (we just need a rough idea)
1319  */
1320 #if BITS_PER_LONG == 32
1321 #define VMALLOC_SPACE		(128UL*1024*1024)
1322 #else
1323 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
1324 #endif
1325 
1326 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1327 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1328 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1329 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1330 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1331 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1332 #define VMAP_BBMAP_BITS		\
1333 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1334 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1335 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1336 
1337 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1338 
1339 struct vmap_block_queue {
1340 	spinlock_t lock;
1341 	struct list_head free;
1342 };
1343 
1344 struct vmap_block {
1345 	spinlock_t lock;
1346 	struct vmap_area *va;
1347 	unsigned long free, dirty;
1348 	unsigned long dirty_min, dirty_max; /*< dirty range */
1349 	struct list_head free_list;
1350 	struct rcu_head rcu_head;
1351 	struct list_head purge;
1352 };
1353 
1354 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1355 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1356 
1357 /*
1358  * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
1359  * in the free path. Could get rid of this if we change the API to return a
1360  * "cookie" from alloc, to be passed to free. But no big deal yet.
1361  */
1362 static DEFINE_SPINLOCK(vmap_block_tree_lock);
1363 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
1364 
1365 /*
1366  * We should probably have a fallback mechanism to allocate virtual memory
1367  * out of partially filled vmap blocks. However vmap block sizing should be
1368  * fairly reasonable according to the vmalloc size, so it shouldn't be a
1369  * big problem.
1370  */
1371 
1372 static unsigned long addr_to_vb_idx(unsigned long addr)
1373 {
1374 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1375 	addr /= VMAP_BLOCK_SIZE;
1376 	return addr;
1377 }
1378 
1379 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1380 {
1381 	unsigned long addr;
1382 
1383 	addr = va_start + (pages_off << PAGE_SHIFT);
1384 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1385 	return (void *)addr;
1386 }
1387 
1388 /**
1389  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1390  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1391  * @order:    how many 2^order pages should be occupied in newly allocated block
1392  * @gfp_mask: flags for the page level allocator
1393  *
1394  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1395  */
1396 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1397 {
1398 	struct vmap_block_queue *vbq;
1399 	struct vmap_block *vb;
1400 	struct vmap_area *va;
1401 	unsigned long vb_idx;
1402 	int node, err;
1403 	void *vaddr;
1404 
1405 	node = numa_node_id();
1406 
1407 	vb = kmalloc_node(sizeof(struct vmap_block),
1408 			gfp_mask & GFP_RECLAIM_MASK, node);
1409 	if (unlikely(!vb))
1410 		return ERR_PTR(-ENOMEM);
1411 
1412 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1413 					VMALLOC_START, VMALLOC_END,
1414 					node, gfp_mask);
1415 	if (IS_ERR(va)) {
1416 		kfree(vb);
1417 		return ERR_CAST(va);
1418 	}
1419 
1420 	err = radix_tree_preload(gfp_mask);
1421 	if (unlikely(err)) {
1422 		kfree(vb);
1423 		free_vmap_area(va);
1424 		return ERR_PTR(err);
1425 	}
1426 
1427 	vaddr = vmap_block_vaddr(va->va_start, 0);
1428 	spin_lock_init(&vb->lock);
1429 	vb->va = va;
1430 	/* At least something should be left free */
1431 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1432 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1433 	vb->dirty = 0;
1434 	vb->dirty_min = VMAP_BBMAP_BITS;
1435 	vb->dirty_max = 0;
1436 	INIT_LIST_HEAD(&vb->free_list);
1437 
1438 	vb_idx = addr_to_vb_idx(va->va_start);
1439 	spin_lock(&vmap_block_tree_lock);
1440 	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
1441 	spin_unlock(&vmap_block_tree_lock);
1442 	BUG_ON(err);
1443 	radix_tree_preload_end();
1444 
1445 	vbq = &get_cpu_var(vmap_block_queue);
1446 	spin_lock(&vbq->lock);
1447 	list_add_tail_rcu(&vb->free_list, &vbq->free);
1448 	spin_unlock(&vbq->lock);
1449 	put_cpu_var(vmap_block_queue);
1450 
1451 	return vaddr;
1452 }
1453 
1454 static void free_vmap_block(struct vmap_block *vb)
1455 {
1456 	struct vmap_block *tmp;
1457 	unsigned long vb_idx;
1458 
1459 	vb_idx = addr_to_vb_idx(vb->va->va_start);
1460 	spin_lock(&vmap_block_tree_lock);
1461 	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
1462 	spin_unlock(&vmap_block_tree_lock);
1463 	BUG_ON(tmp != vb);
1464 
1465 	free_vmap_area_noflush(vb->va);
1466 	kfree_rcu(vb, rcu_head);
1467 }
1468 
1469 static void purge_fragmented_blocks(int cpu)
1470 {
1471 	LIST_HEAD(purge);
1472 	struct vmap_block *vb;
1473 	struct vmap_block *n_vb;
1474 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1475 
1476 	rcu_read_lock();
1477 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1478 
1479 		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1480 			continue;
1481 
1482 		spin_lock(&vb->lock);
1483 		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1484 			vb->free = 0; /* prevent further allocs after releasing lock */
1485 			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1486 			vb->dirty_min = 0;
1487 			vb->dirty_max = VMAP_BBMAP_BITS;
1488 			spin_lock(&vbq->lock);
1489 			list_del_rcu(&vb->free_list);
1490 			spin_unlock(&vbq->lock);
1491 			spin_unlock(&vb->lock);
1492 			list_add_tail(&vb->purge, &purge);
1493 		} else
1494 			spin_unlock(&vb->lock);
1495 	}
1496 	rcu_read_unlock();
1497 
1498 	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1499 		list_del(&vb->purge);
1500 		free_vmap_block(vb);
1501 	}
1502 }
1503 
1504 static void purge_fragmented_blocks_allcpus(void)
1505 {
1506 	int cpu;
1507 
1508 	for_each_possible_cpu(cpu)
1509 		purge_fragmented_blocks(cpu);
1510 }
1511 
1512 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1513 {
1514 	struct vmap_block_queue *vbq;
1515 	struct vmap_block *vb;
1516 	void *vaddr = NULL;
1517 	unsigned int order;
1518 
1519 	BUG_ON(offset_in_page(size));
1520 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1521 	if (WARN_ON(size == 0)) {
1522 		/*
1523 		 * Allocating 0 bytes isn't what caller wants since
1524 		 * get_order(0) returns funny result. Just warn and terminate
1525 		 * early.
1526 		 */
1527 		return NULL;
1528 	}
1529 	order = get_order(size);
1530 
1531 	rcu_read_lock();
1532 	vbq = &get_cpu_var(vmap_block_queue);
1533 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1534 		unsigned long pages_off;
1535 
1536 		spin_lock(&vb->lock);
1537 		if (vb->free < (1UL << order)) {
1538 			spin_unlock(&vb->lock);
1539 			continue;
1540 		}
1541 
1542 		pages_off = VMAP_BBMAP_BITS - vb->free;
1543 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1544 		vb->free -= 1UL << order;
1545 		if (vb->free == 0) {
1546 			spin_lock(&vbq->lock);
1547 			list_del_rcu(&vb->free_list);
1548 			spin_unlock(&vbq->lock);
1549 		}
1550 
1551 		spin_unlock(&vb->lock);
1552 		break;
1553 	}
1554 
1555 	put_cpu_var(vmap_block_queue);
1556 	rcu_read_unlock();
1557 
1558 	/* Allocate new block if nothing was found */
1559 	if (!vaddr)
1560 		vaddr = new_vmap_block(order, gfp_mask);
1561 
1562 	return vaddr;
1563 }
1564 
1565 static void vb_free(const void *addr, unsigned long size)
1566 {
1567 	unsigned long offset;
1568 	unsigned long vb_idx;
1569 	unsigned int order;
1570 	struct vmap_block *vb;
1571 
1572 	BUG_ON(offset_in_page(size));
1573 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1574 
1575 	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1576 
1577 	order = get_order(size);
1578 
1579 	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1580 	offset >>= PAGE_SHIFT;
1581 
1582 	vb_idx = addr_to_vb_idx((unsigned long)addr);
1583 	rcu_read_lock();
1584 	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1585 	rcu_read_unlock();
1586 	BUG_ON(!vb);
1587 
1588 	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1589 
1590 	if (debug_pagealloc_enabled())
1591 		flush_tlb_kernel_range((unsigned long)addr,
1592 					(unsigned long)addr + size);
1593 
1594 	spin_lock(&vb->lock);
1595 
1596 	/* Expand dirty range */
1597 	vb->dirty_min = min(vb->dirty_min, offset);
1598 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1599 
1600 	vb->dirty += 1UL << order;
1601 	if (vb->dirty == VMAP_BBMAP_BITS) {
1602 		BUG_ON(vb->free);
1603 		spin_unlock(&vb->lock);
1604 		free_vmap_block(vb);
1605 	} else
1606 		spin_unlock(&vb->lock);
1607 }
1608 
1609 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
1610 {
1611 	int cpu;
1612 
1613 	if (unlikely(!vmap_initialized))
1614 		return;
1615 
1616 	might_sleep();
1617 
1618 	for_each_possible_cpu(cpu) {
1619 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1620 		struct vmap_block *vb;
1621 
1622 		rcu_read_lock();
1623 		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1624 			spin_lock(&vb->lock);
1625 			if (vb->dirty) {
1626 				unsigned long va_start = vb->va->va_start;
1627 				unsigned long s, e;
1628 
1629 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
1630 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
1631 
1632 				start = min(s, start);
1633 				end   = max(e, end);
1634 
1635 				flush = 1;
1636 			}
1637 			spin_unlock(&vb->lock);
1638 		}
1639 		rcu_read_unlock();
1640 	}
1641 
1642 	mutex_lock(&vmap_purge_lock);
1643 	purge_fragmented_blocks_allcpus();
1644 	if (!__purge_vmap_area_lazy(start, end) && flush)
1645 		flush_tlb_kernel_range(start, end);
1646 	mutex_unlock(&vmap_purge_lock);
1647 }
1648 
1649 /**
1650  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1651  *
1652  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1653  * to amortize TLB flushing overheads. What this means is that any page you
1654  * have now, may, in a former life, have been mapped into kernel virtual
1655  * address by the vmap layer and so there might be some CPUs with TLB entries
1656  * still referencing that page (additional to the regular 1:1 kernel mapping).
1657  *
1658  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1659  * be sure that none of the pages we have control over will have any aliases
1660  * from the vmap layer.
1661  */
1662 void vm_unmap_aliases(void)
1663 {
1664 	unsigned long start = ULONG_MAX, end = 0;
1665 	int flush = 0;
1666 
1667 	_vm_unmap_aliases(start, end, flush);
1668 }
1669 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1670 
1671 /**
1672  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1673  * @mem: the pointer returned by vm_map_ram
1674  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1675  */
1676 void vm_unmap_ram(const void *mem, unsigned int count)
1677 {
1678 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1679 	unsigned long addr = (unsigned long)mem;
1680 	struct vmap_area *va;
1681 
1682 	might_sleep();
1683 	BUG_ON(!addr);
1684 	BUG_ON(addr < VMALLOC_START);
1685 	BUG_ON(addr > VMALLOC_END);
1686 	BUG_ON(!PAGE_ALIGNED(addr));
1687 
1688 	if (likely(count <= VMAP_MAX_ALLOC)) {
1689 		debug_check_no_locks_freed(mem, size);
1690 		vb_free(mem, size);
1691 		return;
1692 	}
1693 
1694 	va = find_vmap_area(addr);
1695 	BUG_ON(!va);
1696 	debug_check_no_locks_freed((void *)va->va_start,
1697 				    (va->va_end - va->va_start));
1698 	free_unmap_vmap_area(va);
1699 }
1700 EXPORT_SYMBOL(vm_unmap_ram);
1701 
1702 /**
1703  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1704  * @pages: an array of pointers to the pages to be mapped
1705  * @count: number of pages
1706  * @node: prefer to allocate data structures on this node
1707  * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1708  *
1709  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1710  * faster than vmap so it's good.  But if you mix long-life and short-life
1711  * objects with vm_map_ram(), it could consume lots of address space through
1712  * fragmentation (especially on a 32bit machine).  You could see failures in
1713  * the end.  Please use this function for short-lived objects.
1714  *
1715  * Returns: a pointer to the address that has been mapped, or %NULL on failure
1716  */
1717 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1718 {
1719 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1720 	unsigned long addr;
1721 	void *mem;
1722 
1723 	if (likely(count <= VMAP_MAX_ALLOC)) {
1724 		mem = vb_alloc(size, GFP_KERNEL);
1725 		if (IS_ERR(mem))
1726 			return NULL;
1727 		addr = (unsigned long)mem;
1728 	} else {
1729 		struct vmap_area *va;
1730 		va = alloc_vmap_area(size, PAGE_SIZE,
1731 				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1732 		if (IS_ERR(va))
1733 			return NULL;
1734 
1735 		addr = va->va_start;
1736 		mem = (void *)addr;
1737 	}
1738 	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1739 		vm_unmap_ram(mem, count);
1740 		return NULL;
1741 	}
1742 	return mem;
1743 }
1744 EXPORT_SYMBOL(vm_map_ram);
1745 
1746 static struct vm_struct *vmlist __initdata;
1747 
1748 /**
1749  * vm_area_add_early - add vmap area early during boot
1750  * @vm: vm_struct to add
1751  *
1752  * This function is used to add fixed kernel vm area to vmlist before
1753  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
1754  * should contain proper values and the other fields should be zero.
1755  *
1756  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1757  */
1758 void __init vm_area_add_early(struct vm_struct *vm)
1759 {
1760 	struct vm_struct *tmp, **p;
1761 
1762 	BUG_ON(vmap_initialized);
1763 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1764 		if (tmp->addr >= vm->addr) {
1765 			BUG_ON(tmp->addr < vm->addr + vm->size);
1766 			break;
1767 		} else
1768 			BUG_ON(tmp->addr + tmp->size > vm->addr);
1769 	}
1770 	vm->next = *p;
1771 	*p = vm;
1772 }
1773 
1774 /**
1775  * vm_area_register_early - register vmap area early during boot
1776  * @vm: vm_struct to register
1777  * @align: requested alignment
1778  *
1779  * This function is used to register kernel vm area before
1780  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
1781  * proper values on entry and other fields should be zero.  On return,
1782  * vm->addr contains the allocated address.
1783  *
1784  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1785  */
1786 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1787 {
1788 	static size_t vm_init_off __initdata;
1789 	unsigned long addr;
1790 
1791 	addr = ALIGN(VMALLOC_START + vm_init_off, align);
1792 	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1793 
1794 	vm->addr = (void *)addr;
1795 
1796 	vm_area_add_early(vm);
1797 }
1798 
1799 static void vmap_init_free_space(void)
1800 {
1801 	unsigned long vmap_start = 1;
1802 	const unsigned long vmap_end = ULONG_MAX;
1803 	struct vmap_area *busy, *free;
1804 
1805 	/*
1806 	 *     B     F     B     B     B     F
1807 	 * -|-----|.....|-----|-----|-----|.....|-
1808 	 *  |           The KVA space           |
1809 	 *  |<--------------------------------->|
1810 	 */
1811 	list_for_each_entry(busy, &vmap_area_list, list) {
1812 		if (busy->va_start - vmap_start > 0) {
1813 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1814 			if (!WARN_ON_ONCE(!free)) {
1815 				free->va_start = vmap_start;
1816 				free->va_end = busy->va_start;
1817 
1818 				insert_vmap_area_augment(free, NULL,
1819 					&free_vmap_area_root,
1820 						&free_vmap_area_list);
1821 			}
1822 		}
1823 
1824 		vmap_start = busy->va_end;
1825 	}
1826 
1827 	if (vmap_end - vmap_start > 0) {
1828 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1829 		if (!WARN_ON_ONCE(!free)) {
1830 			free->va_start = vmap_start;
1831 			free->va_end = vmap_end;
1832 
1833 			insert_vmap_area_augment(free, NULL,
1834 				&free_vmap_area_root,
1835 					&free_vmap_area_list);
1836 		}
1837 	}
1838 }
1839 
1840 void __init vmalloc_init(void)
1841 {
1842 	struct vmap_area *va;
1843 	struct vm_struct *tmp;
1844 	int i;
1845 
1846 	/*
1847 	 * Create the cache for vmap_area objects.
1848 	 */
1849 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1850 
1851 	for_each_possible_cpu(i) {
1852 		struct vmap_block_queue *vbq;
1853 		struct vfree_deferred *p;
1854 
1855 		vbq = &per_cpu(vmap_block_queue, i);
1856 		spin_lock_init(&vbq->lock);
1857 		INIT_LIST_HEAD(&vbq->free);
1858 		p = &per_cpu(vfree_deferred, i);
1859 		init_llist_head(&p->list);
1860 		INIT_WORK(&p->wq, free_work);
1861 	}
1862 
1863 	/* Import existing vmlist entries. */
1864 	for (tmp = vmlist; tmp; tmp = tmp->next) {
1865 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1866 		if (WARN_ON_ONCE(!va))
1867 			continue;
1868 
1869 		va->flags = VM_VM_AREA;
1870 		va->va_start = (unsigned long)tmp->addr;
1871 		va->va_end = va->va_start + tmp->size;
1872 		va->vm = tmp;
1873 		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1874 	}
1875 
1876 	/*
1877 	 * Now we can initialize a free vmap space.
1878 	 */
1879 	vmap_init_free_space();
1880 	vmap_initialized = true;
1881 }
1882 
1883 /**
1884  * map_kernel_range_noflush - map kernel VM area with the specified pages
1885  * @addr: start of the VM area to map
1886  * @size: size of the VM area to map
1887  * @prot: page protection flags to use
1888  * @pages: pages to map
1889  *
1890  * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1891  * specify should have been allocated using get_vm_area() and its
1892  * friends.
1893  *
1894  * NOTE:
1895  * This function does NOT do any cache flushing.  The caller is
1896  * responsible for calling flush_cache_vmap() on to-be-mapped areas
1897  * before calling this function.
1898  *
1899  * RETURNS:
1900  * The number of pages mapped on success, -errno on failure.
1901  */
1902 int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1903 			     pgprot_t prot, struct page **pages)
1904 {
1905 	return vmap_page_range_noflush(addr, addr + size, prot, pages);
1906 }
1907 
1908 /**
1909  * unmap_kernel_range_noflush - unmap kernel VM area
1910  * @addr: start of the VM area to unmap
1911  * @size: size of the VM area to unmap
1912  *
1913  * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1914  * specify should have been allocated using get_vm_area() and its
1915  * friends.
1916  *
1917  * NOTE:
1918  * This function does NOT do any cache flushing.  The caller is
1919  * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1920  * before calling this function and flush_tlb_kernel_range() after.
1921  */
1922 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1923 {
1924 	vunmap_page_range(addr, addr + size);
1925 }
1926 EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1927 
1928 /**
1929  * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1930  * @addr: start of the VM area to unmap
1931  * @size: size of the VM area to unmap
1932  *
1933  * Similar to unmap_kernel_range_noflush() but flushes vcache before
1934  * the unmapping and tlb after.
1935  */
1936 void unmap_kernel_range(unsigned long addr, unsigned long size)
1937 {
1938 	unsigned long end = addr + size;
1939 
1940 	flush_cache_vunmap(addr, end);
1941 	vunmap_page_range(addr, end);
1942 	flush_tlb_kernel_range(addr, end);
1943 }
1944 EXPORT_SYMBOL_GPL(unmap_kernel_range);
1945 
1946 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
1947 {
1948 	unsigned long addr = (unsigned long)area->addr;
1949 	unsigned long end = addr + get_vm_area_size(area);
1950 	int err;
1951 
1952 	err = vmap_page_range(addr, end, prot, pages);
1953 
1954 	return err > 0 ? 0 : err;
1955 }
1956 EXPORT_SYMBOL_GPL(map_vm_area);
1957 
1958 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1959 			      unsigned long flags, const void *caller)
1960 {
1961 	spin_lock(&vmap_area_lock);
1962 	vm->flags = flags;
1963 	vm->addr = (void *)va->va_start;
1964 	vm->size = va->va_end - va->va_start;
1965 	vm->caller = caller;
1966 	va->vm = vm;
1967 	va->flags |= VM_VM_AREA;
1968 	spin_unlock(&vmap_area_lock);
1969 }
1970 
1971 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1972 {
1973 	/*
1974 	 * Before removing VM_UNINITIALIZED,
1975 	 * we should make sure that vm has proper values.
1976 	 * Pair with smp_rmb() in show_numa_info().
1977 	 */
1978 	smp_wmb();
1979 	vm->flags &= ~VM_UNINITIALIZED;
1980 }
1981 
1982 static struct vm_struct *__get_vm_area_node(unsigned long size,
1983 		unsigned long align, unsigned long flags, unsigned long start,
1984 		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1985 {
1986 	struct vmap_area *va;
1987 	struct vm_struct *area;
1988 
1989 	BUG_ON(in_interrupt());
1990 	size = PAGE_ALIGN(size);
1991 	if (unlikely(!size))
1992 		return NULL;
1993 
1994 	if (flags & VM_IOREMAP)
1995 		align = 1ul << clamp_t(int, get_count_order_long(size),
1996 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
1997 
1998 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1999 	if (unlikely(!area))
2000 		return NULL;
2001 
2002 	if (!(flags & VM_NO_GUARD))
2003 		size += PAGE_SIZE;
2004 
2005 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2006 	if (IS_ERR(va)) {
2007 		kfree(area);
2008 		return NULL;
2009 	}
2010 
2011 	setup_vmalloc_vm(area, va, flags, caller);
2012 
2013 	return area;
2014 }
2015 
2016 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
2017 				unsigned long start, unsigned long end)
2018 {
2019 	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2020 				  GFP_KERNEL, __builtin_return_address(0));
2021 }
2022 EXPORT_SYMBOL_GPL(__get_vm_area);
2023 
2024 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2025 				       unsigned long start, unsigned long end,
2026 				       const void *caller)
2027 {
2028 	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2029 				  GFP_KERNEL, caller);
2030 }
2031 
2032 /**
2033  * get_vm_area - reserve a contiguous kernel virtual area
2034  * @size:	 size of the area
2035  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2036  *
2037  * Search an area of @size in the kernel virtual mapping area,
2038  * and reserved it for out purposes.  Returns the area descriptor
2039  * on success or %NULL on failure.
2040  *
2041  * Return: the area descriptor on success or %NULL on failure.
2042  */
2043 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2044 {
2045 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2046 				  NUMA_NO_NODE, GFP_KERNEL,
2047 				  __builtin_return_address(0));
2048 }
2049 
2050 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2051 				const void *caller)
2052 {
2053 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2054 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2055 }
2056 
2057 /**
2058  * find_vm_area - find a continuous kernel virtual area
2059  * @addr:	  base address
2060  *
2061  * Search for the kernel VM area starting at @addr, and return it.
2062  * It is up to the caller to do all required locking to keep the returned
2063  * pointer valid.
2064  *
2065  * Return: pointer to the found area or %NULL on faulure
2066  */
2067 struct vm_struct *find_vm_area(const void *addr)
2068 {
2069 	struct vmap_area *va;
2070 
2071 	va = find_vmap_area((unsigned long)addr);
2072 	if (va && va->flags & VM_VM_AREA)
2073 		return va->vm;
2074 
2075 	return NULL;
2076 }
2077 
2078 /**
2079  * remove_vm_area - find and remove a continuous kernel virtual area
2080  * @addr:	    base address
2081  *
2082  * Search for the kernel VM area starting at @addr, and remove it.
2083  * This function returns the found VM area, but using it is NOT safe
2084  * on SMP machines, except for its size or flags.
2085  *
2086  * Return: pointer to the found area or %NULL on faulure
2087  */
2088 struct vm_struct *remove_vm_area(const void *addr)
2089 {
2090 	struct vmap_area *va;
2091 
2092 	might_sleep();
2093 
2094 	va = find_vmap_area((unsigned long)addr);
2095 	if (va && va->flags & VM_VM_AREA) {
2096 		struct vm_struct *vm = va->vm;
2097 
2098 		spin_lock(&vmap_area_lock);
2099 		va->vm = NULL;
2100 		va->flags &= ~VM_VM_AREA;
2101 		va->flags |= VM_LAZY_FREE;
2102 		spin_unlock(&vmap_area_lock);
2103 
2104 		kasan_free_shadow(vm);
2105 		free_unmap_vmap_area(va);
2106 
2107 		return vm;
2108 	}
2109 	return NULL;
2110 }
2111 
2112 static inline void set_area_direct_map(const struct vm_struct *area,
2113 				       int (*set_direct_map)(struct page *page))
2114 {
2115 	int i;
2116 
2117 	for (i = 0; i < area->nr_pages; i++)
2118 		if (page_address(area->pages[i]))
2119 			set_direct_map(area->pages[i]);
2120 }
2121 
2122 /* Handle removing and resetting vm mappings related to the vm_struct. */
2123 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2124 {
2125 	unsigned long addr = (unsigned long)area->addr;
2126 	unsigned long start = ULONG_MAX, end = 0;
2127 	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2128 	int i;
2129 
2130 	/*
2131 	 * The below block can be removed when all architectures that have
2132 	 * direct map permissions also have set_direct_map_() implementations.
2133 	 * This is concerned with resetting the direct map any an vm alias with
2134 	 * execute permissions, without leaving a RW+X window.
2135 	 */
2136 	if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
2137 		set_memory_nx(addr, area->nr_pages);
2138 		set_memory_rw(addr, area->nr_pages);
2139 	}
2140 
2141 	remove_vm_area(area->addr);
2142 
2143 	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2144 	if (!flush_reset)
2145 		return;
2146 
2147 	/*
2148 	 * If not deallocating pages, just do the flush of the VM area and
2149 	 * return.
2150 	 */
2151 	if (!deallocate_pages) {
2152 		vm_unmap_aliases();
2153 		return;
2154 	}
2155 
2156 	/*
2157 	 * If execution gets here, flush the vm mapping and reset the direct
2158 	 * map. Find the start and end range of the direct mappings to make sure
2159 	 * the vm_unmap_aliases() flush includes the direct map.
2160 	 */
2161 	for (i = 0; i < area->nr_pages; i++) {
2162 		if (page_address(area->pages[i])) {
2163 			start = min(addr, start);
2164 			end = max(addr, end);
2165 		}
2166 	}
2167 
2168 	/*
2169 	 * Set direct map to something invalid so that it won't be cached if
2170 	 * there are any accesses after the TLB flush, then flush the TLB and
2171 	 * reset the direct map permissions to the default.
2172 	 */
2173 	set_area_direct_map(area, set_direct_map_invalid_noflush);
2174 	_vm_unmap_aliases(start, end, 1);
2175 	set_area_direct_map(area, set_direct_map_default_noflush);
2176 }
2177 
2178 static void __vunmap(const void *addr, int deallocate_pages)
2179 {
2180 	struct vm_struct *area;
2181 
2182 	if (!addr)
2183 		return;
2184 
2185 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2186 			addr))
2187 		return;
2188 
2189 	area = find_vm_area(addr);
2190 	if (unlikely(!area)) {
2191 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2192 				addr);
2193 		return;
2194 	}
2195 
2196 	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2197 	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2198 
2199 	vm_remove_mappings(area, deallocate_pages);
2200 
2201 	if (deallocate_pages) {
2202 		int i;
2203 
2204 		for (i = 0; i < area->nr_pages; i++) {
2205 			struct page *page = area->pages[i];
2206 
2207 			BUG_ON(!page);
2208 			__free_pages(page, 0);
2209 		}
2210 
2211 		kvfree(area->pages);
2212 	}
2213 
2214 	kfree(area);
2215 	return;
2216 }
2217 
2218 static inline void __vfree_deferred(const void *addr)
2219 {
2220 	/*
2221 	 * Use raw_cpu_ptr() because this can be called from preemptible
2222 	 * context. Preemption is absolutely fine here, because the llist_add()
2223 	 * implementation is lockless, so it works even if we are adding to
2224 	 * nother cpu's list.  schedule_work() should be fine with this too.
2225 	 */
2226 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2227 
2228 	if (llist_add((struct llist_node *)addr, &p->list))
2229 		schedule_work(&p->wq);
2230 }
2231 
2232 /**
2233  * vfree_atomic - release memory allocated by vmalloc()
2234  * @addr:	  memory base address
2235  *
2236  * This one is just like vfree() but can be called in any atomic context
2237  * except NMIs.
2238  */
2239 void vfree_atomic(const void *addr)
2240 {
2241 	BUG_ON(in_nmi());
2242 
2243 	kmemleak_free(addr);
2244 
2245 	if (!addr)
2246 		return;
2247 	__vfree_deferred(addr);
2248 }
2249 
2250 static void __vfree(const void *addr)
2251 {
2252 	if (unlikely(in_interrupt()))
2253 		__vfree_deferred(addr);
2254 	else
2255 		__vunmap(addr, 1);
2256 }
2257 
2258 /**
2259  * vfree - release memory allocated by vmalloc()
2260  * @addr:  memory base address
2261  *
2262  * Free the virtually continuous memory area starting at @addr, as
2263  * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2264  * NULL, no operation is performed.
2265  *
2266  * Must not be called in NMI context (strictly speaking, only if we don't
2267  * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2268  * conventions for vfree() arch-depenedent would be a really bad idea)
2269  *
2270  * May sleep if called *not* from interrupt context.
2271  *
2272  * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
2273  */
2274 void vfree(const void *addr)
2275 {
2276 	BUG_ON(in_nmi());
2277 
2278 	kmemleak_free(addr);
2279 
2280 	might_sleep_if(!in_interrupt());
2281 
2282 	if (!addr)
2283 		return;
2284 
2285 	__vfree(addr);
2286 }
2287 EXPORT_SYMBOL(vfree);
2288 
2289 /**
2290  * vunmap - release virtual mapping obtained by vmap()
2291  * @addr:   memory base address
2292  *
2293  * Free the virtually contiguous memory area starting at @addr,
2294  * which was created from the page array passed to vmap().
2295  *
2296  * Must not be called in interrupt context.
2297  */
2298 void vunmap(const void *addr)
2299 {
2300 	BUG_ON(in_interrupt());
2301 	might_sleep();
2302 	if (addr)
2303 		__vunmap(addr, 0);
2304 }
2305 EXPORT_SYMBOL(vunmap);
2306 
2307 /**
2308  * vmap - map an array of pages into virtually contiguous space
2309  * @pages: array of page pointers
2310  * @count: number of pages to map
2311  * @flags: vm_area->flags
2312  * @prot: page protection for the mapping
2313  *
2314  * Maps @count pages from @pages into contiguous kernel virtual
2315  * space.
2316  *
2317  * Return: the address of the area or %NULL on failure
2318  */
2319 void *vmap(struct page **pages, unsigned int count,
2320 	   unsigned long flags, pgprot_t prot)
2321 {
2322 	struct vm_struct *area;
2323 	unsigned long size;		/* In bytes */
2324 
2325 	might_sleep();
2326 
2327 	if (count > totalram_pages())
2328 		return NULL;
2329 
2330 	size = (unsigned long)count << PAGE_SHIFT;
2331 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2332 	if (!area)
2333 		return NULL;
2334 
2335 	if (map_vm_area(area, prot, pages)) {
2336 		vunmap(area->addr);
2337 		return NULL;
2338 	}
2339 
2340 	return area->addr;
2341 }
2342 EXPORT_SYMBOL(vmap);
2343 
2344 static void *__vmalloc_node(unsigned long size, unsigned long align,
2345 			    gfp_t gfp_mask, pgprot_t prot,
2346 			    int node, const void *caller);
2347 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2348 				 pgprot_t prot, int node)
2349 {
2350 	struct page **pages;
2351 	unsigned int nr_pages, array_size, i;
2352 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2353 	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2354 	const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2355 					0 :
2356 					__GFP_HIGHMEM;
2357 
2358 	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2359 	array_size = (nr_pages * sizeof(struct page *));
2360 
2361 	area->nr_pages = nr_pages;
2362 	/* Please note that the recursion is strictly bounded. */
2363 	if (array_size > PAGE_SIZE) {
2364 		pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
2365 				PAGE_KERNEL, node, area->caller);
2366 	} else {
2367 		pages = kmalloc_node(array_size, nested_gfp, node);
2368 	}
2369 	area->pages = pages;
2370 	if (!area->pages) {
2371 		remove_vm_area(area->addr);
2372 		kfree(area);
2373 		return NULL;
2374 	}
2375 
2376 	for (i = 0; i < area->nr_pages; i++) {
2377 		struct page *page;
2378 
2379 		if (node == NUMA_NO_NODE)
2380 			page = alloc_page(alloc_mask|highmem_mask);
2381 		else
2382 			page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
2383 
2384 		if (unlikely(!page)) {
2385 			/* Successfully allocated i pages, free them in __vunmap() */
2386 			area->nr_pages = i;
2387 			goto fail;
2388 		}
2389 		area->pages[i] = page;
2390 		if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
2391 			cond_resched();
2392 	}
2393 
2394 	if (map_vm_area(area, prot, pages))
2395 		goto fail;
2396 	return area->addr;
2397 
2398 fail:
2399 	warn_alloc(gfp_mask, NULL,
2400 			  "vmalloc: allocation failure, allocated %ld of %ld bytes",
2401 			  (area->nr_pages*PAGE_SIZE), area->size);
2402 	__vfree(area->addr);
2403 	return NULL;
2404 }
2405 
2406 /**
2407  * __vmalloc_node_range - allocate virtually contiguous memory
2408  * @size:		  allocation size
2409  * @align:		  desired alignment
2410  * @start:		  vm area range start
2411  * @end:		  vm area range end
2412  * @gfp_mask:		  flags for the page level allocator
2413  * @prot:		  protection mask for the allocated pages
2414  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
2415  * @node:		  node to use for allocation or NUMA_NO_NODE
2416  * @caller:		  caller's return address
2417  *
2418  * Allocate enough pages to cover @size from the page level
2419  * allocator with @gfp_mask flags.  Map them into contiguous
2420  * kernel virtual space, using a pagetable protection of @prot.
2421  *
2422  * Return: the address of the area or %NULL on failure
2423  */
2424 void *__vmalloc_node_range(unsigned long size, unsigned long align,
2425 			unsigned long start, unsigned long end, gfp_t gfp_mask,
2426 			pgprot_t prot, unsigned long vm_flags, int node,
2427 			const void *caller)
2428 {
2429 	struct vm_struct *area;
2430 	void *addr;
2431 	unsigned long real_size = size;
2432 
2433 	size = PAGE_ALIGN(size);
2434 	if (!size || (size >> PAGE_SHIFT) > totalram_pages())
2435 		goto fail;
2436 
2437 	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
2438 				vm_flags, start, end, node, gfp_mask, caller);
2439 	if (!area)
2440 		goto fail;
2441 
2442 	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2443 	if (!addr)
2444 		return NULL;
2445 
2446 	/*
2447 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2448 	 * flag. It means that vm_struct is not fully initialized.
2449 	 * Now, it is fully initialized, so remove this flag here.
2450 	 */
2451 	clear_vm_uninitialized_flag(area);
2452 
2453 	kmemleak_vmalloc(area, size, gfp_mask);
2454 
2455 	return addr;
2456 
2457 fail:
2458 	warn_alloc(gfp_mask, NULL,
2459 			  "vmalloc: allocation failure: %lu bytes", real_size);
2460 	return NULL;
2461 }
2462 
2463 /*
2464  * This is only for performance analysis of vmalloc and stress purpose.
2465  * It is required by vmalloc test module, therefore do not use it other
2466  * than that.
2467  */
2468 #ifdef CONFIG_TEST_VMALLOC_MODULE
2469 EXPORT_SYMBOL_GPL(__vmalloc_node_range);
2470 #endif
2471 
2472 /**
2473  * __vmalloc_node - allocate virtually contiguous memory
2474  * @size:	    allocation size
2475  * @align:	    desired alignment
2476  * @gfp_mask:	    flags for the page level allocator
2477  * @prot:	    protection mask for the allocated pages
2478  * @node:	    node to use for allocation or NUMA_NO_NODE
2479  * @caller:	    caller's return address
2480  *
2481  * Allocate enough pages to cover @size from the page level
2482  * allocator with @gfp_mask flags.  Map them into contiguous
2483  * kernel virtual space, using a pagetable protection of @prot.
2484  *
2485  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2486  * and __GFP_NOFAIL are not supported
2487  *
2488  * Any use of gfp flags outside of GFP_KERNEL should be consulted
2489  * with mm people.
2490  *
2491  * Return: pointer to the allocated memory or %NULL on error
2492  */
2493 static void *__vmalloc_node(unsigned long size, unsigned long align,
2494 			    gfp_t gfp_mask, pgprot_t prot,
2495 			    int node, const void *caller)
2496 {
2497 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2498 				gfp_mask, prot, 0, node, caller);
2499 }
2500 
2501 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
2502 {
2503 	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
2504 				__builtin_return_address(0));
2505 }
2506 EXPORT_SYMBOL(__vmalloc);
2507 
2508 static inline void *__vmalloc_node_flags(unsigned long size,
2509 					int node, gfp_t flags)
2510 {
2511 	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
2512 					node, __builtin_return_address(0));
2513 }
2514 
2515 
2516 void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
2517 				  void *caller)
2518 {
2519 	return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
2520 }
2521 
2522 /**
2523  * vmalloc - allocate virtually contiguous memory
2524  * @size:    allocation size
2525  *
2526  * Allocate enough pages to cover @size from the page level
2527  * allocator and map them into contiguous kernel virtual space.
2528  *
2529  * For tight control over page level allocator and protection flags
2530  * use __vmalloc() instead.
2531  *
2532  * Return: pointer to the allocated memory or %NULL on error
2533  */
2534 void *vmalloc(unsigned long size)
2535 {
2536 	return __vmalloc_node_flags(size, NUMA_NO_NODE,
2537 				    GFP_KERNEL);
2538 }
2539 EXPORT_SYMBOL(vmalloc);
2540 
2541 /**
2542  * vzalloc - allocate virtually contiguous memory with zero fill
2543  * @size:    allocation size
2544  *
2545  * Allocate enough pages to cover @size from the page level
2546  * allocator and map them into contiguous kernel virtual space.
2547  * The memory allocated is set to zero.
2548  *
2549  * For tight control over page level allocator and protection flags
2550  * use __vmalloc() instead.
2551  *
2552  * Return: pointer to the allocated memory or %NULL on error
2553  */
2554 void *vzalloc(unsigned long size)
2555 {
2556 	return __vmalloc_node_flags(size, NUMA_NO_NODE,
2557 				GFP_KERNEL | __GFP_ZERO);
2558 }
2559 EXPORT_SYMBOL(vzalloc);
2560 
2561 /**
2562  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2563  * @size: allocation size
2564  *
2565  * The resulting memory area is zeroed so it can be mapped to userspace
2566  * without leaking data.
2567  *
2568  * Return: pointer to the allocated memory or %NULL on error
2569  */
2570 void *vmalloc_user(unsigned long size)
2571 {
2572 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2573 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2574 				    VM_USERMAP, NUMA_NO_NODE,
2575 				    __builtin_return_address(0));
2576 }
2577 EXPORT_SYMBOL(vmalloc_user);
2578 
2579 /**
2580  * vmalloc_node - allocate memory on a specific node
2581  * @size:	  allocation size
2582  * @node:	  numa node
2583  *
2584  * Allocate enough pages to cover @size from the page level
2585  * allocator and map them into contiguous kernel virtual space.
2586  *
2587  * For tight control over page level allocator and protection flags
2588  * use __vmalloc() instead.
2589  *
2590  * Return: pointer to the allocated memory or %NULL on error
2591  */
2592 void *vmalloc_node(unsigned long size, int node)
2593 {
2594 	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
2595 					node, __builtin_return_address(0));
2596 }
2597 EXPORT_SYMBOL(vmalloc_node);
2598 
2599 /**
2600  * vzalloc_node - allocate memory on a specific node with zero fill
2601  * @size:	allocation size
2602  * @node:	numa node
2603  *
2604  * Allocate enough pages to cover @size from the page level
2605  * allocator and map them into contiguous kernel virtual space.
2606  * The memory allocated is set to zero.
2607  *
2608  * For tight control over page level allocator and protection flags
2609  * use __vmalloc_node() instead.
2610  *
2611  * Return: pointer to the allocated memory or %NULL on error
2612  */
2613 void *vzalloc_node(unsigned long size, int node)
2614 {
2615 	return __vmalloc_node_flags(size, node,
2616 			 GFP_KERNEL | __GFP_ZERO);
2617 }
2618 EXPORT_SYMBOL(vzalloc_node);
2619 
2620 /**
2621  * vmalloc_exec - allocate virtually contiguous, executable memory
2622  * @size:	  allocation size
2623  *
2624  * Kernel-internal function to allocate enough pages to cover @size
2625  * the page level allocator and map them into contiguous and
2626  * executable kernel virtual space.
2627  *
2628  * For tight control over page level allocator and protection flags
2629  * use __vmalloc() instead.
2630  *
2631  * Return: pointer to the allocated memory or %NULL on error
2632  */
2633 void *vmalloc_exec(unsigned long size)
2634 {
2635 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2636 			GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2637 			NUMA_NO_NODE, __builtin_return_address(0));
2638 }
2639 
2640 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2641 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2642 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2643 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2644 #else
2645 /*
2646  * 64b systems should always have either DMA or DMA32 zones. For others
2647  * GFP_DMA32 should do the right thing and use the normal zone.
2648  */
2649 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2650 #endif
2651 
2652 /**
2653  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2654  * @size:	allocation size
2655  *
2656  * Allocate enough 32bit PA addressable pages to cover @size from the
2657  * page level allocator and map them into contiguous kernel virtual space.
2658  *
2659  * Return: pointer to the allocated memory or %NULL on error
2660  */
2661 void *vmalloc_32(unsigned long size)
2662 {
2663 	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
2664 			      NUMA_NO_NODE, __builtin_return_address(0));
2665 }
2666 EXPORT_SYMBOL(vmalloc_32);
2667 
2668 /**
2669  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2670  * @size:	     allocation size
2671  *
2672  * The resulting memory area is 32bit addressable and zeroed so it can be
2673  * mapped to userspace without leaking data.
2674  *
2675  * Return: pointer to the allocated memory or %NULL on error
2676  */
2677 void *vmalloc_32_user(unsigned long size)
2678 {
2679 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2680 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2681 				    VM_USERMAP, NUMA_NO_NODE,
2682 				    __builtin_return_address(0));
2683 }
2684 EXPORT_SYMBOL(vmalloc_32_user);
2685 
2686 /*
2687  * small helper routine , copy contents to buf from addr.
2688  * If the page is not present, fill zero.
2689  */
2690 
2691 static int aligned_vread(char *buf, char *addr, unsigned long count)
2692 {
2693 	struct page *p;
2694 	int copied = 0;
2695 
2696 	while (count) {
2697 		unsigned long offset, length;
2698 
2699 		offset = offset_in_page(addr);
2700 		length = PAGE_SIZE - offset;
2701 		if (length > count)
2702 			length = count;
2703 		p = vmalloc_to_page(addr);
2704 		/*
2705 		 * To do safe access to this _mapped_ area, we need
2706 		 * lock. But adding lock here means that we need to add
2707 		 * overhead of vmalloc()/vfree() calles for this _debug_
2708 		 * interface, rarely used. Instead of that, we'll use
2709 		 * kmap() and get small overhead in this access function.
2710 		 */
2711 		if (p) {
2712 			/*
2713 			 * we can expect USER0 is not used (see vread/vwrite's
2714 			 * function description)
2715 			 */
2716 			void *map = kmap_atomic(p);
2717 			memcpy(buf, map + offset, length);
2718 			kunmap_atomic(map);
2719 		} else
2720 			memset(buf, 0, length);
2721 
2722 		addr += length;
2723 		buf += length;
2724 		copied += length;
2725 		count -= length;
2726 	}
2727 	return copied;
2728 }
2729 
2730 static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2731 {
2732 	struct page *p;
2733 	int copied = 0;
2734 
2735 	while (count) {
2736 		unsigned long offset, length;
2737 
2738 		offset = offset_in_page(addr);
2739 		length = PAGE_SIZE - offset;
2740 		if (length > count)
2741 			length = count;
2742 		p = vmalloc_to_page(addr);
2743 		/*
2744 		 * To do safe access to this _mapped_ area, we need
2745 		 * lock. But adding lock here means that we need to add
2746 		 * overhead of vmalloc()/vfree() calles for this _debug_
2747 		 * interface, rarely used. Instead of that, we'll use
2748 		 * kmap() and get small overhead in this access function.
2749 		 */
2750 		if (p) {
2751 			/*
2752 			 * we can expect USER0 is not used (see vread/vwrite's
2753 			 * function description)
2754 			 */
2755 			void *map = kmap_atomic(p);
2756 			memcpy(map + offset, buf, length);
2757 			kunmap_atomic(map);
2758 		}
2759 		addr += length;
2760 		buf += length;
2761 		copied += length;
2762 		count -= length;
2763 	}
2764 	return copied;
2765 }
2766 
2767 /**
2768  * vread() - read vmalloc area in a safe way.
2769  * @buf:     buffer for reading data
2770  * @addr:    vm address.
2771  * @count:   number of bytes to be read.
2772  *
2773  * This function checks that addr is a valid vmalloc'ed area, and
2774  * copy data from that area to a given buffer. If the given memory range
2775  * of [addr...addr+count) includes some valid address, data is copied to
2776  * proper area of @buf. If there are memory holes, they'll be zero-filled.
2777  * IOREMAP area is treated as memory hole and no copy is done.
2778  *
2779  * If [addr...addr+count) doesn't includes any intersects with alive
2780  * vm_struct area, returns 0. @buf should be kernel's buffer.
2781  *
2782  * Note: In usual ops, vread() is never necessary because the caller
2783  * should know vmalloc() area is valid and can use memcpy().
2784  * This is for routines which have to access vmalloc area without
2785  * any informaion, as /dev/kmem.
2786  *
2787  * Return: number of bytes for which addr and buf should be increased
2788  * (same number as @count) or %0 if [addr...addr+count) doesn't
2789  * include any intersection with valid vmalloc area
2790  */
2791 long vread(char *buf, char *addr, unsigned long count)
2792 {
2793 	struct vmap_area *va;
2794 	struct vm_struct *vm;
2795 	char *vaddr, *buf_start = buf;
2796 	unsigned long buflen = count;
2797 	unsigned long n;
2798 
2799 	/* Don't allow overflow */
2800 	if ((unsigned long) addr + count < count)
2801 		count = -(unsigned long) addr;
2802 
2803 	spin_lock(&vmap_area_lock);
2804 	list_for_each_entry(va, &vmap_area_list, list) {
2805 		if (!count)
2806 			break;
2807 
2808 		if (!(va->flags & VM_VM_AREA))
2809 			continue;
2810 
2811 		vm = va->vm;
2812 		vaddr = (char *) vm->addr;
2813 		if (addr >= vaddr + get_vm_area_size(vm))
2814 			continue;
2815 		while (addr < vaddr) {
2816 			if (count == 0)
2817 				goto finished;
2818 			*buf = '\0';
2819 			buf++;
2820 			addr++;
2821 			count--;
2822 		}
2823 		n = vaddr + get_vm_area_size(vm) - addr;
2824 		if (n > count)
2825 			n = count;
2826 		if (!(vm->flags & VM_IOREMAP))
2827 			aligned_vread(buf, addr, n);
2828 		else /* IOREMAP area is treated as memory hole */
2829 			memset(buf, 0, n);
2830 		buf += n;
2831 		addr += n;
2832 		count -= n;
2833 	}
2834 finished:
2835 	spin_unlock(&vmap_area_lock);
2836 
2837 	if (buf == buf_start)
2838 		return 0;
2839 	/* zero-fill memory holes */
2840 	if (buf != buf_start + buflen)
2841 		memset(buf, 0, buflen - (buf - buf_start));
2842 
2843 	return buflen;
2844 }
2845 
2846 /**
2847  * vwrite() - write vmalloc area in a safe way.
2848  * @buf:      buffer for source data
2849  * @addr:     vm address.
2850  * @count:    number of bytes to be read.
2851  *
2852  * This function checks that addr is a valid vmalloc'ed area, and
2853  * copy data from a buffer to the given addr. If specified range of
2854  * [addr...addr+count) includes some valid address, data is copied from
2855  * proper area of @buf. If there are memory holes, no copy to hole.
2856  * IOREMAP area is treated as memory hole and no copy is done.
2857  *
2858  * If [addr...addr+count) doesn't includes any intersects with alive
2859  * vm_struct area, returns 0. @buf should be kernel's buffer.
2860  *
2861  * Note: In usual ops, vwrite() is never necessary because the caller
2862  * should know vmalloc() area is valid and can use memcpy().
2863  * This is for routines which have to access vmalloc area without
2864  * any informaion, as /dev/kmem.
2865  *
2866  * Return: number of bytes for which addr and buf should be
2867  * increased (same number as @count) or %0 if [addr...addr+count)
2868  * doesn't include any intersection with valid vmalloc area
2869  */
2870 long vwrite(char *buf, char *addr, unsigned long count)
2871 {
2872 	struct vmap_area *va;
2873 	struct vm_struct *vm;
2874 	char *vaddr;
2875 	unsigned long n, buflen;
2876 	int copied = 0;
2877 
2878 	/* Don't allow overflow */
2879 	if ((unsigned long) addr + count < count)
2880 		count = -(unsigned long) addr;
2881 	buflen = count;
2882 
2883 	spin_lock(&vmap_area_lock);
2884 	list_for_each_entry(va, &vmap_area_list, list) {
2885 		if (!count)
2886 			break;
2887 
2888 		if (!(va->flags & VM_VM_AREA))
2889 			continue;
2890 
2891 		vm = va->vm;
2892 		vaddr = (char *) vm->addr;
2893 		if (addr >= vaddr + get_vm_area_size(vm))
2894 			continue;
2895 		while (addr < vaddr) {
2896 			if (count == 0)
2897 				goto finished;
2898 			buf++;
2899 			addr++;
2900 			count--;
2901 		}
2902 		n = vaddr + get_vm_area_size(vm) - addr;
2903 		if (n > count)
2904 			n = count;
2905 		if (!(vm->flags & VM_IOREMAP)) {
2906 			aligned_vwrite(buf, addr, n);
2907 			copied++;
2908 		}
2909 		buf += n;
2910 		addr += n;
2911 		count -= n;
2912 	}
2913 finished:
2914 	spin_unlock(&vmap_area_lock);
2915 	if (!copied)
2916 		return 0;
2917 	return buflen;
2918 }
2919 
2920 /**
2921  * remap_vmalloc_range_partial - map vmalloc pages to userspace
2922  * @vma:		vma to cover
2923  * @uaddr:		target user address to start at
2924  * @kaddr:		virtual address of vmalloc kernel memory
2925  * @size:		size of map area
2926  *
2927  * Returns:	0 for success, -Exxx on failure
2928  *
2929  * This function checks that @kaddr is a valid vmalloc'ed area,
2930  * and that it is big enough to cover the range starting at
2931  * @uaddr in @vma. Will return failure if that criteria isn't
2932  * met.
2933  *
2934  * Similar to remap_pfn_range() (see mm/memory.c)
2935  */
2936 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2937 				void *kaddr, unsigned long size)
2938 {
2939 	struct vm_struct *area;
2940 
2941 	size = PAGE_ALIGN(size);
2942 
2943 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2944 		return -EINVAL;
2945 
2946 	area = find_vm_area(kaddr);
2947 	if (!area)
2948 		return -EINVAL;
2949 
2950 	if (!(area->flags & VM_USERMAP))
2951 		return -EINVAL;
2952 
2953 	if (kaddr + size > area->addr + get_vm_area_size(area))
2954 		return -EINVAL;
2955 
2956 	do {
2957 		struct page *page = vmalloc_to_page(kaddr);
2958 		int ret;
2959 
2960 		ret = vm_insert_page(vma, uaddr, page);
2961 		if (ret)
2962 			return ret;
2963 
2964 		uaddr += PAGE_SIZE;
2965 		kaddr += PAGE_SIZE;
2966 		size -= PAGE_SIZE;
2967 	} while (size > 0);
2968 
2969 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2970 
2971 	return 0;
2972 }
2973 EXPORT_SYMBOL(remap_vmalloc_range_partial);
2974 
2975 /**
2976  * remap_vmalloc_range - map vmalloc pages to userspace
2977  * @vma:		vma to cover (map full range of vma)
2978  * @addr:		vmalloc memory
2979  * @pgoff:		number of pages into addr before first page to map
2980  *
2981  * Returns:	0 for success, -Exxx on failure
2982  *
2983  * This function checks that addr is a valid vmalloc'ed area, and
2984  * that it is big enough to cover the vma. Will return failure if
2985  * that criteria isn't met.
2986  *
2987  * Similar to remap_pfn_range() (see mm/memory.c)
2988  */
2989 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2990 						unsigned long pgoff)
2991 {
2992 	return remap_vmalloc_range_partial(vma, vma->vm_start,
2993 					   addr + (pgoff << PAGE_SHIFT),
2994 					   vma->vm_end - vma->vm_start);
2995 }
2996 EXPORT_SYMBOL(remap_vmalloc_range);
2997 
2998 /*
2999  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
3000  * have one.
3001  */
3002 void __weak vmalloc_sync_all(void)
3003 {
3004 }
3005 
3006 
3007 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
3008 {
3009 	pte_t ***p = data;
3010 
3011 	if (p) {
3012 		*(*p) = pte;
3013 		(*p)++;
3014 	}
3015 	return 0;
3016 }
3017 
3018 /**
3019  * alloc_vm_area - allocate a range of kernel address space
3020  * @size:	   size of the area
3021  * @ptes:	   returns the PTEs for the address space
3022  *
3023  * Returns:	NULL on failure, vm_struct on success
3024  *
3025  * This function reserves a range of kernel address space, and
3026  * allocates pagetables to map that range.  No actual mappings
3027  * are created.
3028  *
3029  * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3030  * allocated for the VM area are returned.
3031  */
3032 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
3033 {
3034 	struct vm_struct *area;
3035 
3036 	area = get_vm_area_caller(size, VM_IOREMAP,
3037 				__builtin_return_address(0));
3038 	if (area == NULL)
3039 		return NULL;
3040 
3041 	/*
3042 	 * This ensures that page tables are constructed for this region
3043 	 * of kernel virtual address space and mapped into init_mm.
3044 	 */
3045 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3046 				size, f, ptes ? &ptes : NULL)) {
3047 		free_vm_area(area);
3048 		return NULL;
3049 	}
3050 
3051 	return area;
3052 }
3053 EXPORT_SYMBOL_GPL(alloc_vm_area);
3054 
3055 void free_vm_area(struct vm_struct *area)
3056 {
3057 	struct vm_struct *ret;
3058 	ret = remove_vm_area(area->addr);
3059 	BUG_ON(ret != area);
3060 	kfree(area);
3061 }
3062 EXPORT_SYMBOL_GPL(free_vm_area);
3063 
3064 #ifdef CONFIG_SMP
3065 static struct vmap_area *node_to_va(struct rb_node *n)
3066 {
3067 	return rb_entry_safe(n, struct vmap_area, rb_node);
3068 }
3069 
3070 /**
3071  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3072  * @addr: target address
3073  *
3074  * Returns: vmap_area if it is found. If there is no such area
3075  *   the first highest(reverse order) vmap_area is returned
3076  *   i.e. va->va_start < addr && va->va_end < addr or NULL
3077  *   if there are no any areas before @addr.
3078  */
3079 static struct vmap_area *
3080 pvm_find_va_enclose_addr(unsigned long addr)
3081 {
3082 	struct vmap_area *va, *tmp;
3083 	struct rb_node *n;
3084 
3085 	n = free_vmap_area_root.rb_node;
3086 	va = NULL;
3087 
3088 	while (n) {
3089 		tmp = rb_entry(n, struct vmap_area, rb_node);
3090 		if (tmp->va_start <= addr) {
3091 			va = tmp;
3092 			if (tmp->va_end >= addr)
3093 				break;
3094 
3095 			n = n->rb_right;
3096 		} else {
3097 			n = n->rb_left;
3098 		}
3099 	}
3100 
3101 	return va;
3102 }
3103 
3104 /**
3105  * pvm_determine_end_from_reverse - find the highest aligned address
3106  * of free block below VMALLOC_END
3107  * @va:
3108  *   in - the VA we start the search(reverse order);
3109  *   out - the VA with the highest aligned end address.
3110  *
3111  * Returns: determined end address within vmap_area
3112  */
3113 static unsigned long
3114 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3115 {
3116 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3117 	unsigned long addr;
3118 
3119 	if (likely(*va)) {
3120 		list_for_each_entry_from_reverse((*va),
3121 				&free_vmap_area_list, list) {
3122 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3123 			if ((*va)->va_start < addr)
3124 				return addr;
3125 		}
3126 	}
3127 
3128 	return 0;
3129 }
3130 
3131 /**
3132  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3133  * @offsets: array containing offset of each area
3134  * @sizes: array containing size of each area
3135  * @nr_vms: the number of areas to allocate
3136  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3137  *
3138  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3139  *	    vm_structs on success, %NULL on failure
3140  *
3141  * Percpu allocator wants to use congruent vm areas so that it can
3142  * maintain the offsets among percpu areas.  This function allocates
3143  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3144  * be scattered pretty far, distance between two areas easily going up
3145  * to gigabytes.  To avoid interacting with regular vmallocs, these
3146  * areas are allocated from top.
3147  *
3148  * Despite its complicated look, this allocator is rather simple. It
3149  * does everything top-down and scans free blocks from the end looking
3150  * for matching base. While scanning, if any of the areas do not fit the
3151  * base address is pulled down to fit the area. Scanning is repeated till
3152  * all the areas fit and then all necessary data structures are inserted
3153  * and the result is returned.
3154  */
3155 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3156 				     const size_t *sizes, int nr_vms,
3157 				     size_t align)
3158 {
3159 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3160 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3161 	struct vmap_area **vas, *va;
3162 	struct vm_struct **vms;
3163 	int area, area2, last_area, term_area;
3164 	unsigned long base, start, size, end, last_end;
3165 	bool purged = false;
3166 	enum fit_type type;
3167 
3168 	/* verify parameters and allocate data structures */
3169 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3170 	for (last_area = 0, area = 0; area < nr_vms; area++) {
3171 		start = offsets[area];
3172 		end = start + sizes[area];
3173 
3174 		/* is everything aligned properly? */
3175 		BUG_ON(!IS_ALIGNED(offsets[area], align));
3176 		BUG_ON(!IS_ALIGNED(sizes[area], align));
3177 
3178 		/* detect the area with the highest address */
3179 		if (start > offsets[last_area])
3180 			last_area = area;
3181 
3182 		for (area2 = area + 1; area2 < nr_vms; area2++) {
3183 			unsigned long start2 = offsets[area2];
3184 			unsigned long end2 = start2 + sizes[area2];
3185 
3186 			BUG_ON(start2 < end && start < end2);
3187 		}
3188 	}
3189 	last_end = offsets[last_area] + sizes[last_area];
3190 
3191 	if (vmalloc_end - vmalloc_start < last_end) {
3192 		WARN_ON(true);
3193 		return NULL;
3194 	}
3195 
3196 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3197 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3198 	if (!vas || !vms)
3199 		goto err_free2;
3200 
3201 	for (area = 0; area < nr_vms; area++) {
3202 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3203 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3204 		if (!vas[area] || !vms[area])
3205 			goto err_free;
3206 	}
3207 retry:
3208 	spin_lock(&vmap_area_lock);
3209 
3210 	/* start scanning - we scan from the top, begin with the last area */
3211 	area = term_area = last_area;
3212 	start = offsets[area];
3213 	end = start + sizes[area];
3214 
3215 	va = pvm_find_va_enclose_addr(vmalloc_end);
3216 	base = pvm_determine_end_from_reverse(&va, align) - end;
3217 
3218 	while (true) {
3219 		/*
3220 		 * base might have underflowed, add last_end before
3221 		 * comparing.
3222 		 */
3223 		if (base + last_end < vmalloc_start + last_end)
3224 			goto overflow;
3225 
3226 		/*
3227 		 * Fitting base has not been found.
3228 		 */
3229 		if (va == NULL)
3230 			goto overflow;
3231 
3232 		/*
3233 		 * If this VA does not fit, move base downwards and recheck.
3234 		 */
3235 		if (base + start < va->va_start || base + end > va->va_end) {
3236 			va = node_to_va(rb_prev(&va->rb_node));
3237 			base = pvm_determine_end_from_reverse(&va, align) - end;
3238 			term_area = area;
3239 			continue;
3240 		}
3241 
3242 		/*
3243 		 * This area fits, move on to the previous one.  If
3244 		 * the previous one is the terminal one, we're done.
3245 		 */
3246 		area = (area + nr_vms - 1) % nr_vms;
3247 		if (area == term_area)
3248 			break;
3249 
3250 		start = offsets[area];
3251 		end = start + sizes[area];
3252 		va = pvm_find_va_enclose_addr(base + end);
3253 	}
3254 
3255 	/* we've found a fitting base, insert all va's */
3256 	for (area = 0; area < nr_vms; area++) {
3257 		int ret;
3258 
3259 		start = base + offsets[area];
3260 		size = sizes[area];
3261 
3262 		va = pvm_find_va_enclose_addr(start);
3263 		if (WARN_ON_ONCE(va == NULL))
3264 			/* It is a BUG(), but trigger recovery instead. */
3265 			goto recovery;
3266 
3267 		type = classify_va_fit_type(va, start, size);
3268 		if (WARN_ON_ONCE(type == NOTHING_FIT))
3269 			/* It is a BUG(), but trigger recovery instead. */
3270 			goto recovery;
3271 
3272 		ret = adjust_va_to_fit_type(va, start, size, type);
3273 		if (unlikely(ret))
3274 			goto recovery;
3275 
3276 		/* Allocated area. */
3277 		va = vas[area];
3278 		va->va_start = start;
3279 		va->va_end = start + size;
3280 
3281 		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
3282 	}
3283 
3284 	spin_unlock(&vmap_area_lock);
3285 
3286 	/* insert all vm's */
3287 	for (area = 0; area < nr_vms; area++)
3288 		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
3289 				 pcpu_get_vm_areas);
3290 
3291 	kfree(vas);
3292 	return vms;
3293 
3294 recovery:
3295 	/* Remove previously inserted areas. */
3296 	while (area--) {
3297 		__free_vmap_area(vas[area]);
3298 		vas[area] = NULL;
3299 	}
3300 
3301 overflow:
3302 	spin_unlock(&vmap_area_lock);
3303 	if (!purged) {
3304 		purge_vmap_area_lazy();
3305 		purged = true;
3306 
3307 		/* Before "retry", check if we recover. */
3308 		for (area = 0; area < nr_vms; area++) {
3309 			if (vas[area])
3310 				continue;
3311 
3312 			vas[area] = kmem_cache_zalloc(
3313 				vmap_area_cachep, GFP_KERNEL);
3314 			if (!vas[area])
3315 				goto err_free;
3316 		}
3317 
3318 		goto retry;
3319 	}
3320 
3321 err_free:
3322 	for (area = 0; area < nr_vms; area++) {
3323 		if (vas[area])
3324 			kmem_cache_free(vmap_area_cachep, vas[area]);
3325 
3326 		kfree(vms[area]);
3327 	}
3328 err_free2:
3329 	kfree(vas);
3330 	kfree(vms);
3331 	return NULL;
3332 }
3333 
3334 /**
3335  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3336  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3337  * @nr_vms: the number of allocated areas
3338  *
3339  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3340  */
3341 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3342 {
3343 	int i;
3344 
3345 	for (i = 0; i < nr_vms; i++)
3346 		free_vm_area(vms[i]);
3347 	kfree(vms);
3348 }
3349 #endif	/* CONFIG_SMP */
3350 
3351 #ifdef CONFIG_PROC_FS
3352 static void *s_start(struct seq_file *m, loff_t *pos)
3353 	__acquires(&vmap_area_lock)
3354 {
3355 	spin_lock(&vmap_area_lock);
3356 	return seq_list_start(&vmap_area_list, *pos);
3357 }
3358 
3359 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3360 {
3361 	return seq_list_next(p, &vmap_area_list, pos);
3362 }
3363 
3364 static void s_stop(struct seq_file *m, void *p)
3365 	__releases(&vmap_area_lock)
3366 {
3367 	spin_unlock(&vmap_area_lock);
3368 }
3369 
3370 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3371 {
3372 	if (IS_ENABLED(CONFIG_NUMA)) {
3373 		unsigned int nr, *counters = m->private;
3374 
3375 		if (!counters)
3376 			return;
3377 
3378 		if (v->flags & VM_UNINITIALIZED)
3379 			return;
3380 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3381 		smp_rmb();
3382 
3383 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3384 
3385 		for (nr = 0; nr < v->nr_pages; nr++)
3386 			counters[page_to_nid(v->pages[nr])]++;
3387 
3388 		for_each_node_state(nr, N_HIGH_MEMORY)
3389 			if (counters[nr])
3390 				seq_printf(m, " N%u=%u", nr, counters[nr]);
3391 	}
3392 }
3393 
3394 static int s_show(struct seq_file *m, void *p)
3395 {
3396 	struct vmap_area *va;
3397 	struct vm_struct *v;
3398 
3399 	va = list_entry(p, struct vmap_area, list);
3400 
3401 	/*
3402 	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
3403 	 * behalf of vmap area is being tear down or vm_map_ram allocation.
3404 	 */
3405 	if (!(va->flags & VM_VM_AREA)) {
3406 		seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
3407 			(void *)va->va_start, (void *)va->va_end,
3408 			va->va_end - va->va_start,
3409 			va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
3410 
3411 		return 0;
3412 	}
3413 
3414 	v = va->vm;
3415 
3416 	seq_printf(m, "0x%pK-0x%pK %7ld",
3417 		v->addr, v->addr + v->size, v->size);
3418 
3419 	if (v->caller)
3420 		seq_printf(m, " %pS", v->caller);
3421 
3422 	if (v->nr_pages)
3423 		seq_printf(m, " pages=%d", v->nr_pages);
3424 
3425 	if (v->phys_addr)
3426 		seq_printf(m, " phys=%pa", &v->phys_addr);
3427 
3428 	if (v->flags & VM_IOREMAP)
3429 		seq_puts(m, " ioremap");
3430 
3431 	if (v->flags & VM_ALLOC)
3432 		seq_puts(m, " vmalloc");
3433 
3434 	if (v->flags & VM_MAP)
3435 		seq_puts(m, " vmap");
3436 
3437 	if (v->flags & VM_USERMAP)
3438 		seq_puts(m, " user");
3439 
3440 	if (is_vmalloc_addr(v->pages))
3441 		seq_puts(m, " vpages");
3442 
3443 	show_numa_info(m, v);
3444 	seq_putc(m, '\n');
3445 	return 0;
3446 }
3447 
3448 static const struct seq_operations vmalloc_op = {
3449 	.start = s_start,
3450 	.next = s_next,
3451 	.stop = s_stop,
3452 	.show = s_show,
3453 };
3454 
3455 static int __init proc_vmalloc_init(void)
3456 {
3457 	if (IS_ENABLED(CONFIG_NUMA))
3458 		proc_create_seq_private("vmallocinfo", 0400, NULL,
3459 				&vmalloc_op,
3460 				nr_node_ids * sizeof(unsigned int), NULL);
3461 	else
3462 		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3463 	return 0;
3464 }
3465 module_init(proc_vmalloc_init);
3466 
3467 #endif
3468