xref: /openbmc/linux/mm/vmalloc.c (revision 2c64e9cb)
1 /*
2  *  linux/mm/vmalloc.c
3  *
4  *  Copyright (C) 1993  Linus Torvalds
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8  *  Numa awareness, Christoph Lameter, SGI, June 2005
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/radix-tree.h>
28 #include <linux/rcupdate.h>
29 #include <linux/pfn.h>
30 #include <linux/kmemleak.h>
31 #include <linux/atomic.h>
32 #include <linux/compiler.h>
33 #include <linux/llist.h>
34 #include <linux/bitops.h>
35 
36 #include <linux/uaccess.h>
37 #include <asm/tlbflush.h>
38 #include <asm/shmparam.h>
39 
40 #include "internal.h"
41 
42 struct vfree_deferred {
43 	struct llist_head list;
44 	struct work_struct wq;
45 };
46 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
47 
48 static void __vunmap(const void *, int);
49 
50 static void free_work(struct work_struct *w)
51 {
52 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
53 	struct llist_node *t, *llnode;
54 
55 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
56 		__vunmap((void *)llnode, 1);
57 }
58 
59 /*** Page table manipulation functions ***/
60 
61 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
62 {
63 	pte_t *pte;
64 
65 	pte = pte_offset_kernel(pmd, addr);
66 	do {
67 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
68 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
69 	} while (pte++, addr += PAGE_SIZE, addr != end);
70 }
71 
72 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
73 {
74 	pmd_t *pmd;
75 	unsigned long next;
76 
77 	pmd = pmd_offset(pud, addr);
78 	do {
79 		next = pmd_addr_end(addr, end);
80 		if (pmd_clear_huge(pmd))
81 			continue;
82 		if (pmd_none_or_clear_bad(pmd))
83 			continue;
84 		vunmap_pte_range(pmd, addr, next);
85 	} while (pmd++, addr = next, addr != end);
86 }
87 
88 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
89 {
90 	pud_t *pud;
91 	unsigned long next;
92 
93 	pud = pud_offset(p4d, addr);
94 	do {
95 		next = pud_addr_end(addr, end);
96 		if (pud_clear_huge(pud))
97 			continue;
98 		if (pud_none_or_clear_bad(pud))
99 			continue;
100 		vunmap_pmd_range(pud, addr, next);
101 	} while (pud++, addr = next, addr != end);
102 }
103 
104 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
105 {
106 	p4d_t *p4d;
107 	unsigned long next;
108 
109 	p4d = p4d_offset(pgd, addr);
110 	do {
111 		next = p4d_addr_end(addr, end);
112 		if (p4d_clear_huge(p4d))
113 			continue;
114 		if (p4d_none_or_clear_bad(p4d))
115 			continue;
116 		vunmap_pud_range(p4d, addr, next);
117 	} while (p4d++, addr = next, addr != end);
118 }
119 
120 static void vunmap_page_range(unsigned long addr, unsigned long end)
121 {
122 	pgd_t *pgd;
123 	unsigned long next;
124 
125 	BUG_ON(addr >= end);
126 	pgd = pgd_offset_k(addr);
127 	do {
128 		next = pgd_addr_end(addr, end);
129 		if (pgd_none_or_clear_bad(pgd))
130 			continue;
131 		vunmap_p4d_range(pgd, addr, next);
132 	} while (pgd++, addr = next, addr != end);
133 }
134 
135 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
136 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
137 {
138 	pte_t *pte;
139 
140 	/*
141 	 * nr is a running index into the array which helps higher level
142 	 * callers keep track of where we're up to.
143 	 */
144 
145 	pte = pte_alloc_kernel(pmd, addr);
146 	if (!pte)
147 		return -ENOMEM;
148 	do {
149 		struct page *page = pages[*nr];
150 
151 		if (WARN_ON(!pte_none(*pte)))
152 			return -EBUSY;
153 		if (WARN_ON(!page))
154 			return -ENOMEM;
155 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
156 		(*nr)++;
157 	} while (pte++, addr += PAGE_SIZE, addr != end);
158 	return 0;
159 }
160 
161 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
162 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
163 {
164 	pmd_t *pmd;
165 	unsigned long next;
166 
167 	pmd = pmd_alloc(&init_mm, pud, addr);
168 	if (!pmd)
169 		return -ENOMEM;
170 	do {
171 		next = pmd_addr_end(addr, end);
172 		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
173 			return -ENOMEM;
174 	} while (pmd++, addr = next, addr != end);
175 	return 0;
176 }
177 
178 static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
179 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
180 {
181 	pud_t *pud;
182 	unsigned long next;
183 
184 	pud = pud_alloc(&init_mm, p4d, addr);
185 	if (!pud)
186 		return -ENOMEM;
187 	do {
188 		next = pud_addr_end(addr, end);
189 		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
190 			return -ENOMEM;
191 	} while (pud++, addr = next, addr != end);
192 	return 0;
193 }
194 
195 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
196 		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
197 {
198 	p4d_t *p4d;
199 	unsigned long next;
200 
201 	p4d = p4d_alloc(&init_mm, pgd, addr);
202 	if (!p4d)
203 		return -ENOMEM;
204 	do {
205 		next = p4d_addr_end(addr, end);
206 		if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
207 			return -ENOMEM;
208 	} while (p4d++, addr = next, addr != end);
209 	return 0;
210 }
211 
212 /*
213  * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
214  * will have pfns corresponding to the "pages" array.
215  *
216  * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
217  */
218 static int vmap_page_range_noflush(unsigned long start, unsigned long end,
219 				   pgprot_t prot, struct page **pages)
220 {
221 	pgd_t *pgd;
222 	unsigned long next;
223 	unsigned long addr = start;
224 	int err = 0;
225 	int nr = 0;
226 
227 	BUG_ON(addr >= end);
228 	pgd = pgd_offset_k(addr);
229 	do {
230 		next = pgd_addr_end(addr, end);
231 		err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
232 		if (err)
233 			return err;
234 	} while (pgd++, addr = next, addr != end);
235 
236 	return nr;
237 }
238 
239 static int vmap_page_range(unsigned long start, unsigned long end,
240 			   pgprot_t prot, struct page **pages)
241 {
242 	int ret;
243 
244 	ret = vmap_page_range_noflush(start, end, prot, pages);
245 	flush_cache_vmap(start, end);
246 	return ret;
247 }
248 
249 int is_vmalloc_or_module_addr(const void *x)
250 {
251 	/*
252 	 * ARM, x86-64 and sparc64 put modules in a special place,
253 	 * and fall back on vmalloc() if that fails. Others
254 	 * just put it in the vmalloc space.
255 	 */
256 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
257 	unsigned long addr = (unsigned long)x;
258 	if (addr >= MODULES_VADDR && addr < MODULES_END)
259 		return 1;
260 #endif
261 	return is_vmalloc_addr(x);
262 }
263 
264 /*
265  * Walk a vmap address to the struct page it maps.
266  */
267 struct page *vmalloc_to_page(const void *vmalloc_addr)
268 {
269 	unsigned long addr = (unsigned long) vmalloc_addr;
270 	struct page *page = NULL;
271 	pgd_t *pgd = pgd_offset_k(addr);
272 	p4d_t *p4d;
273 	pud_t *pud;
274 	pmd_t *pmd;
275 	pte_t *ptep, pte;
276 
277 	/*
278 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
279 	 * architectures that do not vmalloc module space
280 	 */
281 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
282 
283 	if (pgd_none(*pgd))
284 		return NULL;
285 	p4d = p4d_offset(pgd, addr);
286 	if (p4d_none(*p4d))
287 		return NULL;
288 	pud = pud_offset(p4d, addr);
289 
290 	/*
291 	 * Don't dereference bad PUD or PMD (below) entries. This will also
292 	 * identify huge mappings, which we may encounter on architectures
293 	 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
294 	 * identified as vmalloc addresses by is_vmalloc_addr(), but are
295 	 * not [unambiguously] associated with a struct page, so there is
296 	 * no correct value to return for them.
297 	 */
298 	WARN_ON_ONCE(pud_bad(*pud));
299 	if (pud_none(*pud) || pud_bad(*pud))
300 		return NULL;
301 	pmd = pmd_offset(pud, addr);
302 	WARN_ON_ONCE(pmd_bad(*pmd));
303 	if (pmd_none(*pmd) || pmd_bad(*pmd))
304 		return NULL;
305 
306 	ptep = pte_offset_map(pmd, addr);
307 	pte = *ptep;
308 	if (pte_present(pte))
309 		page = pte_page(pte);
310 	pte_unmap(ptep);
311 	return page;
312 }
313 EXPORT_SYMBOL(vmalloc_to_page);
314 
315 /*
316  * Map a vmalloc()-space virtual address to the physical page frame number.
317  */
318 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
319 {
320 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
321 }
322 EXPORT_SYMBOL(vmalloc_to_pfn);
323 
324 
325 /*** Global kva allocator ***/
326 
327 #define VM_LAZY_FREE	0x02
328 #define VM_VM_AREA	0x04
329 
330 static DEFINE_SPINLOCK(vmap_area_lock);
331 /* Export for kexec only */
332 LIST_HEAD(vmap_area_list);
333 static LLIST_HEAD(vmap_purge_list);
334 static struct rb_root vmap_area_root = RB_ROOT;
335 
336 /* The vmap cache globals are protected by vmap_area_lock */
337 static struct rb_node *free_vmap_cache;
338 static unsigned long cached_hole_size;
339 static unsigned long cached_vstart;
340 static unsigned long cached_align;
341 
342 static unsigned long vmap_area_pcpu_hole;
343 
344 static struct vmap_area *__find_vmap_area(unsigned long addr)
345 {
346 	struct rb_node *n = vmap_area_root.rb_node;
347 
348 	while (n) {
349 		struct vmap_area *va;
350 
351 		va = rb_entry(n, struct vmap_area, rb_node);
352 		if (addr < va->va_start)
353 			n = n->rb_left;
354 		else if (addr >= va->va_end)
355 			n = n->rb_right;
356 		else
357 			return va;
358 	}
359 
360 	return NULL;
361 }
362 
363 static void __insert_vmap_area(struct vmap_area *va)
364 {
365 	struct rb_node **p = &vmap_area_root.rb_node;
366 	struct rb_node *parent = NULL;
367 	struct rb_node *tmp;
368 
369 	while (*p) {
370 		struct vmap_area *tmp_va;
371 
372 		parent = *p;
373 		tmp_va = rb_entry(parent, struct vmap_area, rb_node);
374 		if (va->va_start < tmp_va->va_end)
375 			p = &(*p)->rb_left;
376 		else if (va->va_end > tmp_va->va_start)
377 			p = &(*p)->rb_right;
378 		else
379 			BUG();
380 	}
381 
382 	rb_link_node(&va->rb_node, parent, p);
383 	rb_insert_color(&va->rb_node, &vmap_area_root);
384 
385 	/* address-sort this list */
386 	tmp = rb_prev(&va->rb_node);
387 	if (tmp) {
388 		struct vmap_area *prev;
389 		prev = rb_entry(tmp, struct vmap_area, rb_node);
390 		list_add_rcu(&va->list, &prev->list);
391 	} else
392 		list_add_rcu(&va->list, &vmap_area_list);
393 }
394 
395 static void purge_vmap_area_lazy(void);
396 
397 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
398 
399 /*
400  * Allocate a region of KVA of the specified size and alignment, within the
401  * vstart and vend.
402  */
403 static struct vmap_area *alloc_vmap_area(unsigned long size,
404 				unsigned long align,
405 				unsigned long vstart, unsigned long vend,
406 				int node, gfp_t gfp_mask)
407 {
408 	struct vmap_area *va;
409 	struct rb_node *n;
410 	unsigned long addr;
411 	int purged = 0;
412 	struct vmap_area *first;
413 
414 	BUG_ON(!size);
415 	BUG_ON(offset_in_page(size));
416 	BUG_ON(!is_power_of_2(align));
417 
418 	might_sleep();
419 
420 	va = kmalloc_node(sizeof(struct vmap_area),
421 			gfp_mask & GFP_RECLAIM_MASK, node);
422 	if (unlikely(!va))
423 		return ERR_PTR(-ENOMEM);
424 
425 	/*
426 	 * Only scan the relevant parts containing pointers to other objects
427 	 * to avoid false negatives.
428 	 */
429 	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
430 
431 retry:
432 	spin_lock(&vmap_area_lock);
433 	/*
434 	 * Invalidate cache if we have more permissive parameters.
435 	 * cached_hole_size notes the largest hole noticed _below_
436 	 * the vmap_area cached in free_vmap_cache: if size fits
437 	 * into that hole, we want to scan from vstart to reuse
438 	 * the hole instead of allocating above free_vmap_cache.
439 	 * Note that __free_vmap_area may update free_vmap_cache
440 	 * without updating cached_hole_size or cached_align.
441 	 */
442 	if (!free_vmap_cache ||
443 			size < cached_hole_size ||
444 			vstart < cached_vstart ||
445 			align < cached_align) {
446 nocache:
447 		cached_hole_size = 0;
448 		free_vmap_cache = NULL;
449 	}
450 	/* record if we encounter less permissive parameters */
451 	cached_vstart = vstart;
452 	cached_align = align;
453 
454 	/* find starting point for our search */
455 	if (free_vmap_cache) {
456 		first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
457 		addr = ALIGN(first->va_end, align);
458 		if (addr < vstart)
459 			goto nocache;
460 		if (addr + size < addr)
461 			goto overflow;
462 
463 	} else {
464 		addr = ALIGN(vstart, align);
465 		if (addr + size < addr)
466 			goto overflow;
467 
468 		n = vmap_area_root.rb_node;
469 		first = NULL;
470 
471 		while (n) {
472 			struct vmap_area *tmp;
473 			tmp = rb_entry(n, struct vmap_area, rb_node);
474 			if (tmp->va_end >= addr) {
475 				first = tmp;
476 				if (tmp->va_start <= addr)
477 					break;
478 				n = n->rb_left;
479 			} else
480 				n = n->rb_right;
481 		}
482 
483 		if (!first)
484 			goto found;
485 	}
486 
487 	/* from the starting point, walk areas until a suitable hole is found */
488 	while (addr + size > first->va_start && addr + size <= vend) {
489 		if (addr + cached_hole_size < first->va_start)
490 			cached_hole_size = first->va_start - addr;
491 		addr = ALIGN(first->va_end, align);
492 		if (addr + size < addr)
493 			goto overflow;
494 
495 		if (list_is_last(&first->list, &vmap_area_list))
496 			goto found;
497 
498 		first = list_next_entry(first, list);
499 	}
500 
501 found:
502 	/*
503 	 * Check also calculated address against the vstart,
504 	 * because it can be 0 because of big align request.
505 	 */
506 	if (addr + size > vend || addr < vstart)
507 		goto overflow;
508 
509 	va->va_start = addr;
510 	va->va_end = addr + size;
511 	va->flags = 0;
512 	__insert_vmap_area(va);
513 	free_vmap_cache = &va->rb_node;
514 	spin_unlock(&vmap_area_lock);
515 
516 	BUG_ON(!IS_ALIGNED(va->va_start, align));
517 	BUG_ON(va->va_start < vstart);
518 	BUG_ON(va->va_end > vend);
519 
520 	return va;
521 
522 overflow:
523 	spin_unlock(&vmap_area_lock);
524 	if (!purged) {
525 		purge_vmap_area_lazy();
526 		purged = 1;
527 		goto retry;
528 	}
529 
530 	if (gfpflags_allow_blocking(gfp_mask)) {
531 		unsigned long freed = 0;
532 		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
533 		if (freed > 0) {
534 			purged = 0;
535 			goto retry;
536 		}
537 	}
538 
539 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
540 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
541 			size);
542 	kfree(va);
543 	return ERR_PTR(-EBUSY);
544 }
545 
546 int register_vmap_purge_notifier(struct notifier_block *nb)
547 {
548 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
549 }
550 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
551 
552 int unregister_vmap_purge_notifier(struct notifier_block *nb)
553 {
554 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
555 }
556 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
557 
558 static void __free_vmap_area(struct vmap_area *va)
559 {
560 	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
561 
562 	if (free_vmap_cache) {
563 		if (va->va_end < cached_vstart) {
564 			free_vmap_cache = NULL;
565 		} else {
566 			struct vmap_area *cache;
567 			cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
568 			if (va->va_start <= cache->va_start) {
569 				free_vmap_cache = rb_prev(&va->rb_node);
570 				/*
571 				 * We don't try to update cached_hole_size or
572 				 * cached_align, but it won't go very wrong.
573 				 */
574 			}
575 		}
576 	}
577 	rb_erase(&va->rb_node, &vmap_area_root);
578 	RB_CLEAR_NODE(&va->rb_node);
579 	list_del_rcu(&va->list);
580 
581 	/*
582 	 * Track the highest possible candidate for pcpu area
583 	 * allocation.  Areas outside of vmalloc area can be returned
584 	 * here too, consider only end addresses which fall inside
585 	 * vmalloc area proper.
586 	 */
587 	if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
588 		vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
589 
590 	kfree_rcu(va, rcu_head);
591 }
592 
593 /*
594  * Free a region of KVA allocated by alloc_vmap_area
595  */
596 static void free_vmap_area(struct vmap_area *va)
597 {
598 	spin_lock(&vmap_area_lock);
599 	__free_vmap_area(va);
600 	spin_unlock(&vmap_area_lock);
601 }
602 
603 /*
604  * Clear the pagetable entries of a given vmap_area
605  */
606 static void unmap_vmap_area(struct vmap_area *va)
607 {
608 	vunmap_page_range(va->va_start, va->va_end);
609 }
610 
611 /*
612  * lazy_max_pages is the maximum amount of virtual address space we gather up
613  * before attempting to purge with a TLB flush.
614  *
615  * There is a tradeoff here: a larger number will cover more kernel page tables
616  * and take slightly longer to purge, but it will linearly reduce the number of
617  * global TLB flushes that must be performed. It would seem natural to scale
618  * this number up linearly with the number of CPUs (because vmapping activity
619  * could also scale linearly with the number of CPUs), however it is likely
620  * that in practice, workloads might be constrained in other ways that mean
621  * vmap activity will not scale linearly with CPUs. Also, I want to be
622  * conservative and not introduce a big latency on huge systems, so go with
623  * a less aggressive log scale. It will still be an improvement over the old
624  * code, and it will be simple to change the scale factor if we find that it
625  * becomes a problem on bigger systems.
626  */
627 static unsigned long lazy_max_pages(void)
628 {
629 	unsigned int log;
630 
631 	log = fls(num_online_cpus());
632 
633 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
634 }
635 
636 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
637 
638 /*
639  * Serialize vmap purging.  There is no actual criticial section protected
640  * by this look, but we want to avoid concurrent calls for performance
641  * reasons and to make the pcpu_get_vm_areas more deterministic.
642  */
643 static DEFINE_MUTEX(vmap_purge_lock);
644 
645 /* for per-CPU blocks */
646 static void purge_fragmented_blocks_allcpus(void);
647 
648 /*
649  * called before a call to iounmap() if the caller wants vm_area_struct's
650  * immediately freed.
651  */
652 void set_iounmap_nonlazy(void)
653 {
654 	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
655 }
656 
657 /*
658  * Purges all lazily-freed vmap areas.
659  */
660 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
661 {
662 	unsigned long resched_threshold;
663 	struct llist_node *valist;
664 	struct vmap_area *va;
665 	struct vmap_area *n_va;
666 
667 	lockdep_assert_held(&vmap_purge_lock);
668 
669 	valist = llist_del_all(&vmap_purge_list);
670 	if (unlikely(valist == NULL))
671 		return false;
672 
673 	/*
674 	 * TODO: to calculate a flush range without looping.
675 	 * The list can be up to lazy_max_pages() elements.
676 	 */
677 	llist_for_each_entry(va, valist, purge_list) {
678 		if (va->va_start < start)
679 			start = va->va_start;
680 		if (va->va_end > end)
681 			end = va->va_end;
682 	}
683 
684 	flush_tlb_kernel_range(start, end);
685 	resched_threshold = lazy_max_pages() << 1;
686 
687 	spin_lock(&vmap_area_lock);
688 	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
689 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
690 
691 		__free_vmap_area(va);
692 		atomic_long_sub(nr, &vmap_lazy_nr);
693 
694 		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
695 			cond_resched_lock(&vmap_area_lock);
696 	}
697 	spin_unlock(&vmap_area_lock);
698 	return true;
699 }
700 
701 /*
702  * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
703  * is already purging.
704  */
705 static void try_purge_vmap_area_lazy(void)
706 {
707 	if (mutex_trylock(&vmap_purge_lock)) {
708 		__purge_vmap_area_lazy(ULONG_MAX, 0);
709 		mutex_unlock(&vmap_purge_lock);
710 	}
711 }
712 
713 /*
714  * Kick off a purge of the outstanding lazy areas.
715  */
716 static void purge_vmap_area_lazy(void)
717 {
718 	mutex_lock(&vmap_purge_lock);
719 	purge_fragmented_blocks_allcpus();
720 	__purge_vmap_area_lazy(ULONG_MAX, 0);
721 	mutex_unlock(&vmap_purge_lock);
722 }
723 
724 /*
725  * Free a vmap area, caller ensuring that the area has been unmapped
726  * and flush_cache_vunmap had been called for the correct range
727  * previously.
728  */
729 static void free_vmap_area_noflush(struct vmap_area *va)
730 {
731 	unsigned long nr_lazy;
732 
733 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
734 				PAGE_SHIFT, &vmap_lazy_nr);
735 
736 	/* After this point, we may free va at any time */
737 	llist_add(&va->purge_list, &vmap_purge_list);
738 
739 	if (unlikely(nr_lazy > lazy_max_pages()))
740 		try_purge_vmap_area_lazy();
741 }
742 
743 /*
744  * Free and unmap a vmap area
745  */
746 static void free_unmap_vmap_area(struct vmap_area *va)
747 {
748 	flush_cache_vunmap(va->va_start, va->va_end);
749 	unmap_vmap_area(va);
750 	if (debug_pagealloc_enabled())
751 		flush_tlb_kernel_range(va->va_start, va->va_end);
752 
753 	free_vmap_area_noflush(va);
754 }
755 
756 static struct vmap_area *find_vmap_area(unsigned long addr)
757 {
758 	struct vmap_area *va;
759 
760 	spin_lock(&vmap_area_lock);
761 	va = __find_vmap_area(addr);
762 	spin_unlock(&vmap_area_lock);
763 
764 	return va;
765 }
766 
767 /*** Per cpu kva allocator ***/
768 
769 /*
770  * vmap space is limited especially on 32 bit architectures. Ensure there is
771  * room for at least 16 percpu vmap blocks per CPU.
772  */
773 /*
774  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
775  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
776  * instead (we just need a rough idea)
777  */
778 #if BITS_PER_LONG == 32
779 #define VMALLOC_SPACE		(128UL*1024*1024)
780 #else
781 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
782 #endif
783 
784 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
785 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
786 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
787 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
788 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
789 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
790 #define VMAP_BBMAP_BITS		\
791 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
792 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
793 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
794 
795 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
796 
797 static bool vmap_initialized __read_mostly = false;
798 
799 struct vmap_block_queue {
800 	spinlock_t lock;
801 	struct list_head free;
802 };
803 
804 struct vmap_block {
805 	spinlock_t lock;
806 	struct vmap_area *va;
807 	unsigned long free, dirty;
808 	unsigned long dirty_min, dirty_max; /*< dirty range */
809 	struct list_head free_list;
810 	struct rcu_head rcu_head;
811 	struct list_head purge;
812 };
813 
814 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
815 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
816 
817 /*
818  * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
819  * in the free path. Could get rid of this if we change the API to return a
820  * "cookie" from alloc, to be passed to free. But no big deal yet.
821  */
822 static DEFINE_SPINLOCK(vmap_block_tree_lock);
823 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
824 
825 /*
826  * We should probably have a fallback mechanism to allocate virtual memory
827  * out of partially filled vmap blocks. However vmap block sizing should be
828  * fairly reasonable according to the vmalloc size, so it shouldn't be a
829  * big problem.
830  */
831 
832 static unsigned long addr_to_vb_idx(unsigned long addr)
833 {
834 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
835 	addr /= VMAP_BLOCK_SIZE;
836 	return addr;
837 }
838 
839 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
840 {
841 	unsigned long addr;
842 
843 	addr = va_start + (pages_off << PAGE_SHIFT);
844 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
845 	return (void *)addr;
846 }
847 
848 /**
849  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
850  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
851  * @order:    how many 2^order pages should be occupied in newly allocated block
852  * @gfp_mask: flags for the page level allocator
853  *
854  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
855  */
856 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
857 {
858 	struct vmap_block_queue *vbq;
859 	struct vmap_block *vb;
860 	struct vmap_area *va;
861 	unsigned long vb_idx;
862 	int node, err;
863 	void *vaddr;
864 
865 	node = numa_node_id();
866 
867 	vb = kmalloc_node(sizeof(struct vmap_block),
868 			gfp_mask & GFP_RECLAIM_MASK, node);
869 	if (unlikely(!vb))
870 		return ERR_PTR(-ENOMEM);
871 
872 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
873 					VMALLOC_START, VMALLOC_END,
874 					node, gfp_mask);
875 	if (IS_ERR(va)) {
876 		kfree(vb);
877 		return ERR_CAST(va);
878 	}
879 
880 	err = radix_tree_preload(gfp_mask);
881 	if (unlikely(err)) {
882 		kfree(vb);
883 		free_vmap_area(va);
884 		return ERR_PTR(err);
885 	}
886 
887 	vaddr = vmap_block_vaddr(va->va_start, 0);
888 	spin_lock_init(&vb->lock);
889 	vb->va = va;
890 	/* At least something should be left free */
891 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
892 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
893 	vb->dirty = 0;
894 	vb->dirty_min = VMAP_BBMAP_BITS;
895 	vb->dirty_max = 0;
896 	INIT_LIST_HEAD(&vb->free_list);
897 
898 	vb_idx = addr_to_vb_idx(va->va_start);
899 	spin_lock(&vmap_block_tree_lock);
900 	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
901 	spin_unlock(&vmap_block_tree_lock);
902 	BUG_ON(err);
903 	radix_tree_preload_end();
904 
905 	vbq = &get_cpu_var(vmap_block_queue);
906 	spin_lock(&vbq->lock);
907 	list_add_tail_rcu(&vb->free_list, &vbq->free);
908 	spin_unlock(&vbq->lock);
909 	put_cpu_var(vmap_block_queue);
910 
911 	return vaddr;
912 }
913 
914 static void free_vmap_block(struct vmap_block *vb)
915 {
916 	struct vmap_block *tmp;
917 	unsigned long vb_idx;
918 
919 	vb_idx = addr_to_vb_idx(vb->va->va_start);
920 	spin_lock(&vmap_block_tree_lock);
921 	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
922 	spin_unlock(&vmap_block_tree_lock);
923 	BUG_ON(tmp != vb);
924 
925 	free_vmap_area_noflush(vb->va);
926 	kfree_rcu(vb, rcu_head);
927 }
928 
929 static void purge_fragmented_blocks(int cpu)
930 {
931 	LIST_HEAD(purge);
932 	struct vmap_block *vb;
933 	struct vmap_block *n_vb;
934 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
935 
936 	rcu_read_lock();
937 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
938 
939 		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
940 			continue;
941 
942 		spin_lock(&vb->lock);
943 		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
944 			vb->free = 0; /* prevent further allocs after releasing lock */
945 			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
946 			vb->dirty_min = 0;
947 			vb->dirty_max = VMAP_BBMAP_BITS;
948 			spin_lock(&vbq->lock);
949 			list_del_rcu(&vb->free_list);
950 			spin_unlock(&vbq->lock);
951 			spin_unlock(&vb->lock);
952 			list_add_tail(&vb->purge, &purge);
953 		} else
954 			spin_unlock(&vb->lock);
955 	}
956 	rcu_read_unlock();
957 
958 	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
959 		list_del(&vb->purge);
960 		free_vmap_block(vb);
961 	}
962 }
963 
964 static void purge_fragmented_blocks_allcpus(void)
965 {
966 	int cpu;
967 
968 	for_each_possible_cpu(cpu)
969 		purge_fragmented_blocks(cpu);
970 }
971 
972 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
973 {
974 	struct vmap_block_queue *vbq;
975 	struct vmap_block *vb;
976 	void *vaddr = NULL;
977 	unsigned int order;
978 
979 	BUG_ON(offset_in_page(size));
980 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
981 	if (WARN_ON(size == 0)) {
982 		/*
983 		 * Allocating 0 bytes isn't what caller wants since
984 		 * get_order(0) returns funny result. Just warn and terminate
985 		 * early.
986 		 */
987 		return NULL;
988 	}
989 	order = get_order(size);
990 
991 	rcu_read_lock();
992 	vbq = &get_cpu_var(vmap_block_queue);
993 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
994 		unsigned long pages_off;
995 
996 		spin_lock(&vb->lock);
997 		if (vb->free < (1UL << order)) {
998 			spin_unlock(&vb->lock);
999 			continue;
1000 		}
1001 
1002 		pages_off = VMAP_BBMAP_BITS - vb->free;
1003 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1004 		vb->free -= 1UL << order;
1005 		if (vb->free == 0) {
1006 			spin_lock(&vbq->lock);
1007 			list_del_rcu(&vb->free_list);
1008 			spin_unlock(&vbq->lock);
1009 		}
1010 
1011 		spin_unlock(&vb->lock);
1012 		break;
1013 	}
1014 
1015 	put_cpu_var(vmap_block_queue);
1016 	rcu_read_unlock();
1017 
1018 	/* Allocate new block if nothing was found */
1019 	if (!vaddr)
1020 		vaddr = new_vmap_block(order, gfp_mask);
1021 
1022 	return vaddr;
1023 }
1024 
1025 static void vb_free(const void *addr, unsigned long size)
1026 {
1027 	unsigned long offset;
1028 	unsigned long vb_idx;
1029 	unsigned int order;
1030 	struct vmap_block *vb;
1031 
1032 	BUG_ON(offset_in_page(size));
1033 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1034 
1035 	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1036 
1037 	order = get_order(size);
1038 
1039 	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1040 	offset >>= PAGE_SHIFT;
1041 
1042 	vb_idx = addr_to_vb_idx((unsigned long)addr);
1043 	rcu_read_lock();
1044 	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1045 	rcu_read_unlock();
1046 	BUG_ON(!vb);
1047 
1048 	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1049 
1050 	if (debug_pagealloc_enabled())
1051 		flush_tlb_kernel_range((unsigned long)addr,
1052 					(unsigned long)addr + size);
1053 
1054 	spin_lock(&vb->lock);
1055 
1056 	/* Expand dirty range */
1057 	vb->dirty_min = min(vb->dirty_min, offset);
1058 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1059 
1060 	vb->dirty += 1UL << order;
1061 	if (vb->dirty == VMAP_BBMAP_BITS) {
1062 		BUG_ON(vb->free);
1063 		spin_unlock(&vb->lock);
1064 		free_vmap_block(vb);
1065 	} else
1066 		spin_unlock(&vb->lock);
1067 }
1068 
1069 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
1070 {
1071 	int cpu;
1072 
1073 	if (unlikely(!vmap_initialized))
1074 		return;
1075 
1076 	might_sleep();
1077 
1078 	for_each_possible_cpu(cpu) {
1079 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1080 		struct vmap_block *vb;
1081 
1082 		rcu_read_lock();
1083 		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1084 			spin_lock(&vb->lock);
1085 			if (vb->dirty) {
1086 				unsigned long va_start = vb->va->va_start;
1087 				unsigned long s, e;
1088 
1089 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
1090 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
1091 
1092 				start = min(s, start);
1093 				end   = max(e, end);
1094 
1095 				flush = 1;
1096 			}
1097 			spin_unlock(&vb->lock);
1098 		}
1099 		rcu_read_unlock();
1100 	}
1101 
1102 	mutex_lock(&vmap_purge_lock);
1103 	purge_fragmented_blocks_allcpus();
1104 	if (!__purge_vmap_area_lazy(start, end) && flush)
1105 		flush_tlb_kernel_range(start, end);
1106 	mutex_unlock(&vmap_purge_lock);
1107 }
1108 
1109 /**
1110  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1111  *
1112  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1113  * to amortize TLB flushing overheads. What this means is that any page you
1114  * have now, may, in a former life, have been mapped into kernel virtual
1115  * address by the vmap layer and so there might be some CPUs with TLB entries
1116  * still referencing that page (additional to the regular 1:1 kernel mapping).
1117  *
1118  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1119  * be sure that none of the pages we have control over will have any aliases
1120  * from the vmap layer.
1121  */
1122 void vm_unmap_aliases(void)
1123 {
1124 	unsigned long start = ULONG_MAX, end = 0;
1125 	int flush = 0;
1126 
1127 	_vm_unmap_aliases(start, end, flush);
1128 }
1129 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1130 
1131 /**
1132  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1133  * @mem: the pointer returned by vm_map_ram
1134  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1135  */
1136 void vm_unmap_ram(const void *mem, unsigned int count)
1137 {
1138 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1139 	unsigned long addr = (unsigned long)mem;
1140 	struct vmap_area *va;
1141 
1142 	might_sleep();
1143 	BUG_ON(!addr);
1144 	BUG_ON(addr < VMALLOC_START);
1145 	BUG_ON(addr > VMALLOC_END);
1146 	BUG_ON(!PAGE_ALIGNED(addr));
1147 
1148 	if (likely(count <= VMAP_MAX_ALLOC)) {
1149 		debug_check_no_locks_freed(mem, size);
1150 		vb_free(mem, size);
1151 		return;
1152 	}
1153 
1154 	va = find_vmap_area(addr);
1155 	BUG_ON(!va);
1156 	debug_check_no_locks_freed((void *)va->va_start,
1157 				    (va->va_end - va->va_start));
1158 	free_unmap_vmap_area(va);
1159 }
1160 EXPORT_SYMBOL(vm_unmap_ram);
1161 
1162 /**
1163  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1164  * @pages: an array of pointers to the pages to be mapped
1165  * @count: number of pages
1166  * @node: prefer to allocate data structures on this node
1167  * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1168  *
1169  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1170  * faster than vmap so it's good.  But if you mix long-life and short-life
1171  * objects with vm_map_ram(), it could consume lots of address space through
1172  * fragmentation (especially on a 32bit machine).  You could see failures in
1173  * the end.  Please use this function for short-lived objects.
1174  *
1175  * Returns: a pointer to the address that has been mapped, or %NULL on failure
1176  */
1177 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1178 {
1179 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1180 	unsigned long addr;
1181 	void *mem;
1182 
1183 	if (likely(count <= VMAP_MAX_ALLOC)) {
1184 		mem = vb_alloc(size, GFP_KERNEL);
1185 		if (IS_ERR(mem))
1186 			return NULL;
1187 		addr = (unsigned long)mem;
1188 	} else {
1189 		struct vmap_area *va;
1190 		va = alloc_vmap_area(size, PAGE_SIZE,
1191 				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1192 		if (IS_ERR(va))
1193 			return NULL;
1194 
1195 		addr = va->va_start;
1196 		mem = (void *)addr;
1197 	}
1198 	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1199 		vm_unmap_ram(mem, count);
1200 		return NULL;
1201 	}
1202 	return mem;
1203 }
1204 EXPORT_SYMBOL(vm_map_ram);
1205 
1206 static struct vm_struct *vmlist __initdata;
1207 
1208 /**
1209  * vm_area_add_early - add vmap area early during boot
1210  * @vm: vm_struct to add
1211  *
1212  * This function is used to add fixed kernel vm area to vmlist before
1213  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
1214  * should contain proper values and the other fields should be zero.
1215  *
1216  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1217  */
1218 void __init vm_area_add_early(struct vm_struct *vm)
1219 {
1220 	struct vm_struct *tmp, **p;
1221 
1222 	BUG_ON(vmap_initialized);
1223 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1224 		if (tmp->addr >= vm->addr) {
1225 			BUG_ON(tmp->addr < vm->addr + vm->size);
1226 			break;
1227 		} else
1228 			BUG_ON(tmp->addr + tmp->size > vm->addr);
1229 	}
1230 	vm->next = *p;
1231 	*p = vm;
1232 }
1233 
1234 /**
1235  * vm_area_register_early - register vmap area early during boot
1236  * @vm: vm_struct to register
1237  * @align: requested alignment
1238  *
1239  * This function is used to register kernel vm area before
1240  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
1241  * proper values on entry and other fields should be zero.  On return,
1242  * vm->addr contains the allocated address.
1243  *
1244  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1245  */
1246 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1247 {
1248 	static size_t vm_init_off __initdata;
1249 	unsigned long addr;
1250 
1251 	addr = ALIGN(VMALLOC_START + vm_init_off, align);
1252 	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1253 
1254 	vm->addr = (void *)addr;
1255 
1256 	vm_area_add_early(vm);
1257 }
1258 
1259 void __init vmalloc_init(void)
1260 {
1261 	struct vmap_area *va;
1262 	struct vm_struct *tmp;
1263 	int i;
1264 
1265 	for_each_possible_cpu(i) {
1266 		struct vmap_block_queue *vbq;
1267 		struct vfree_deferred *p;
1268 
1269 		vbq = &per_cpu(vmap_block_queue, i);
1270 		spin_lock_init(&vbq->lock);
1271 		INIT_LIST_HEAD(&vbq->free);
1272 		p = &per_cpu(vfree_deferred, i);
1273 		init_llist_head(&p->list);
1274 		INIT_WORK(&p->wq, free_work);
1275 	}
1276 
1277 	/* Import existing vmlist entries. */
1278 	for (tmp = vmlist; tmp; tmp = tmp->next) {
1279 		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1280 		va->flags = VM_VM_AREA;
1281 		va->va_start = (unsigned long)tmp->addr;
1282 		va->va_end = va->va_start + tmp->size;
1283 		va->vm = tmp;
1284 		__insert_vmap_area(va);
1285 	}
1286 
1287 	vmap_area_pcpu_hole = VMALLOC_END;
1288 
1289 	vmap_initialized = true;
1290 }
1291 
1292 /**
1293  * map_kernel_range_noflush - map kernel VM area with the specified pages
1294  * @addr: start of the VM area to map
1295  * @size: size of the VM area to map
1296  * @prot: page protection flags to use
1297  * @pages: pages to map
1298  *
1299  * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1300  * specify should have been allocated using get_vm_area() and its
1301  * friends.
1302  *
1303  * NOTE:
1304  * This function does NOT do any cache flushing.  The caller is
1305  * responsible for calling flush_cache_vmap() on to-be-mapped areas
1306  * before calling this function.
1307  *
1308  * RETURNS:
1309  * The number of pages mapped on success, -errno on failure.
1310  */
1311 int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1312 			     pgprot_t prot, struct page **pages)
1313 {
1314 	return vmap_page_range_noflush(addr, addr + size, prot, pages);
1315 }
1316 
1317 /**
1318  * unmap_kernel_range_noflush - unmap kernel VM area
1319  * @addr: start of the VM area to unmap
1320  * @size: size of the VM area to unmap
1321  *
1322  * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1323  * specify should have been allocated using get_vm_area() and its
1324  * friends.
1325  *
1326  * NOTE:
1327  * This function does NOT do any cache flushing.  The caller is
1328  * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1329  * before calling this function and flush_tlb_kernel_range() after.
1330  */
1331 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1332 {
1333 	vunmap_page_range(addr, addr + size);
1334 }
1335 EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1336 
1337 /**
1338  * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1339  * @addr: start of the VM area to unmap
1340  * @size: size of the VM area to unmap
1341  *
1342  * Similar to unmap_kernel_range_noflush() but flushes vcache before
1343  * the unmapping and tlb after.
1344  */
1345 void unmap_kernel_range(unsigned long addr, unsigned long size)
1346 {
1347 	unsigned long end = addr + size;
1348 
1349 	flush_cache_vunmap(addr, end);
1350 	vunmap_page_range(addr, end);
1351 	flush_tlb_kernel_range(addr, end);
1352 }
1353 EXPORT_SYMBOL_GPL(unmap_kernel_range);
1354 
1355 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
1356 {
1357 	unsigned long addr = (unsigned long)area->addr;
1358 	unsigned long end = addr + get_vm_area_size(area);
1359 	int err;
1360 
1361 	err = vmap_page_range(addr, end, prot, pages);
1362 
1363 	return err > 0 ? 0 : err;
1364 }
1365 EXPORT_SYMBOL_GPL(map_vm_area);
1366 
1367 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1368 			      unsigned long flags, const void *caller)
1369 {
1370 	spin_lock(&vmap_area_lock);
1371 	vm->flags = flags;
1372 	vm->addr = (void *)va->va_start;
1373 	vm->size = va->va_end - va->va_start;
1374 	vm->caller = caller;
1375 	va->vm = vm;
1376 	va->flags |= VM_VM_AREA;
1377 	spin_unlock(&vmap_area_lock);
1378 }
1379 
1380 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1381 {
1382 	/*
1383 	 * Before removing VM_UNINITIALIZED,
1384 	 * we should make sure that vm has proper values.
1385 	 * Pair with smp_rmb() in show_numa_info().
1386 	 */
1387 	smp_wmb();
1388 	vm->flags &= ~VM_UNINITIALIZED;
1389 }
1390 
1391 static struct vm_struct *__get_vm_area_node(unsigned long size,
1392 		unsigned long align, unsigned long flags, unsigned long start,
1393 		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1394 {
1395 	struct vmap_area *va;
1396 	struct vm_struct *area;
1397 
1398 	BUG_ON(in_interrupt());
1399 	size = PAGE_ALIGN(size);
1400 	if (unlikely(!size))
1401 		return NULL;
1402 
1403 	if (flags & VM_IOREMAP)
1404 		align = 1ul << clamp_t(int, get_count_order_long(size),
1405 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
1406 
1407 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1408 	if (unlikely(!area))
1409 		return NULL;
1410 
1411 	if (!(flags & VM_NO_GUARD))
1412 		size += PAGE_SIZE;
1413 
1414 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1415 	if (IS_ERR(va)) {
1416 		kfree(area);
1417 		return NULL;
1418 	}
1419 
1420 	setup_vmalloc_vm(area, va, flags, caller);
1421 
1422 	return area;
1423 }
1424 
1425 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1426 				unsigned long start, unsigned long end)
1427 {
1428 	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1429 				  GFP_KERNEL, __builtin_return_address(0));
1430 }
1431 EXPORT_SYMBOL_GPL(__get_vm_area);
1432 
1433 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1434 				       unsigned long start, unsigned long end,
1435 				       const void *caller)
1436 {
1437 	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1438 				  GFP_KERNEL, caller);
1439 }
1440 
1441 /**
1442  * get_vm_area - reserve a contiguous kernel virtual area
1443  * @size:	 size of the area
1444  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
1445  *
1446  * Search an area of @size in the kernel virtual mapping area,
1447  * and reserved it for out purposes.  Returns the area descriptor
1448  * on success or %NULL on failure.
1449  *
1450  * Return: the area descriptor on success or %NULL on failure.
1451  */
1452 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1453 {
1454 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1455 				  NUMA_NO_NODE, GFP_KERNEL,
1456 				  __builtin_return_address(0));
1457 }
1458 
1459 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1460 				const void *caller)
1461 {
1462 	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1463 				  NUMA_NO_NODE, GFP_KERNEL, caller);
1464 }
1465 
1466 /**
1467  * find_vm_area - find a continuous kernel virtual area
1468  * @addr:	  base address
1469  *
1470  * Search for the kernel VM area starting at @addr, and return it.
1471  * It is up to the caller to do all required locking to keep the returned
1472  * pointer valid.
1473  *
1474  * Return: pointer to the found area or %NULL on faulure
1475  */
1476 struct vm_struct *find_vm_area(const void *addr)
1477 {
1478 	struct vmap_area *va;
1479 
1480 	va = find_vmap_area((unsigned long)addr);
1481 	if (va && va->flags & VM_VM_AREA)
1482 		return va->vm;
1483 
1484 	return NULL;
1485 }
1486 
1487 /**
1488  * remove_vm_area - find and remove a continuous kernel virtual area
1489  * @addr:	    base address
1490  *
1491  * Search for the kernel VM area starting at @addr, and remove it.
1492  * This function returns the found VM area, but using it is NOT safe
1493  * on SMP machines, except for its size or flags.
1494  *
1495  * Return: pointer to the found area or %NULL on faulure
1496  */
1497 struct vm_struct *remove_vm_area(const void *addr)
1498 {
1499 	struct vmap_area *va;
1500 
1501 	might_sleep();
1502 
1503 	va = find_vmap_area((unsigned long)addr);
1504 	if (va && va->flags & VM_VM_AREA) {
1505 		struct vm_struct *vm = va->vm;
1506 
1507 		spin_lock(&vmap_area_lock);
1508 		va->vm = NULL;
1509 		va->flags &= ~VM_VM_AREA;
1510 		va->flags |= VM_LAZY_FREE;
1511 		spin_unlock(&vmap_area_lock);
1512 
1513 		kasan_free_shadow(vm);
1514 		free_unmap_vmap_area(va);
1515 
1516 		return vm;
1517 	}
1518 	return NULL;
1519 }
1520 
1521 static inline void set_area_direct_map(const struct vm_struct *area,
1522 				       int (*set_direct_map)(struct page *page))
1523 {
1524 	int i;
1525 
1526 	for (i = 0; i < area->nr_pages; i++)
1527 		if (page_address(area->pages[i]))
1528 			set_direct_map(area->pages[i]);
1529 }
1530 
1531 /* Handle removing and resetting vm mappings related to the vm_struct. */
1532 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
1533 {
1534 	unsigned long addr = (unsigned long)area->addr;
1535 	unsigned long start = ULONG_MAX, end = 0;
1536 	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
1537 	int i;
1538 
1539 	/*
1540 	 * The below block can be removed when all architectures that have
1541 	 * direct map permissions also have set_direct_map_() implementations.
1542 	 * This is concerned with resetting the direct map any an vm alias with
1543 	 * execute permissions, without leaving a RW+X window.
1544 	 */
1545 	if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
1546 		set_memory_nx(addr, area->nr_pages);
1547 		set_memory_rw(addr, area->nr_pages);
1548 	}
1549 
1550 	remove_vm_area(area->addr);
1551 
1552 	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
1553 	if (!flush_reset)
1554 		return;
1555 
1556 	/*
1557 	 * If not deallocating pages, just do the flush of the VM area and
1558 	 * return.
1559 	 */
1560 	if (!deallocate_pages) {
1561 		vm_unmap_aliases();
1562 		return;
1563 	}
1564 
1565 	/*
1566 	 * If execution gets here, flush the vm mapping and reset the direct
1567 	 * map. Find the start and end range of the direct mappings to make sure
1568 	 * the vm_unmap_aliases() flush includes the direct map.
1569 	 */
1570 	for (i = 0; i < area->nr_pages; i++) {
1571 		if (page_address(area->pages[i])) {
1572 			start = min(addr, start);
1573 			end = max(addr, end);
1574 		}
1575 	}
1576 
1577 	/*
1578 	 * Set direct map to something invalid so that it won't be cached if
1579 	 * there are any accesses after the TLB flush, then flush the TLB and
1580 	 * reset the direct map permissions to the default.
1581 	 */
1582 	set_area_direct_map(area, set_direct_map_invalid_noflush);
1583 	_vm_unmap_aliases(start, end, 1);
1584 	set_area_direct_map(area, set_direct_map_default_noflush);
1585 }
1586 
1587 static void __vunmap(const void *addr, int deallocate_pages)
1588 {
1589 	struct vm_struct *area;
1590 
1591 	if (!addr)
1592 		return;
1593 
1594 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1595 			addr))
1596 		return;
1597 
1598 	area = find_vm_area(addr);
1599 	if (unlikely(!area)) {
1600 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1601 				addr);
1602 		return;
1603 	}
1604 
1605 	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
1606 	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
1607 
1608 	vm_remove_mappings(area, deallocate_pages);
1609 
1610 	if (deallocate_pages) {
1611 		int i;
1612 
1613 		for (i = 0; i < area->nr_pages; i++) {
1614 			struct page *page = area->pages[i];
1615 
1616 			BUG_ON(!page);
1617 			__free_pages(page, 0);
1618 		}
1619 
1620 		kvfree(area->pages);
1621 	}
1622 
1623 	kfree(area);
1624 	return;
1625 }
1626 
1627 static inline void __vfree_deferred(const void *addr)
1628 {
1629 	/*
1630 	 * Use raw_cpu_ptr() because this can be called from preemptible
1631 	 * context. Preemption is absolutely fine here, because the llist_add()
1632 	 * implementation is lockless, so it works even if we are adding to
1633 	 * nother cpu's list.  schedule_work() should be fine with this too.
1634 	 */
1635 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
1636 
1637 	if (llist_add((struct llist_node *)addr, &p->list))
1638 		schedule_work(&p->wq);
1639 }
1640 
1641 /**
1642  * vfree_atomic - release memory allocated by vmalloc()
1643  * @addr:	  memory base address
1644  *
1645  * This one is just like vfree() but can be called in any atomic context
1646  * except NMIs.
1647  */
1648 void vfree_atomic(const void *addr)
1649 {
1650 	BUG_ON(in_nmi());
1651 
1652 	kmemleak_free(addr);
1653 
1654 	if (!addr)
1655 		return;
1656 	__vfree_deferred(addr);
1657 }
1658 
1659 static void __vfree(const void *addr)
1660 {
1661 	if (unlikely(in_interrupt()))
1662 		__vfree_deferred(addr);
1663 	else
1664 		__vunmap(addr, 1);
1665 }
1666 
1667 /**
1668  * vfree - release memory allocated by vmalloc()
1669  * @addr:  memory base address
1670  *
1671  * Free the virtually continuous memory area starting at @addr, as
1672  * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1673  * NULL, no operation is performed.
1674  *
1675  * Must not be called in NMI context (strictly speaking, only if we don't
1676  * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1677  * conventions for vfree() arch-depenedent would be a really bad idea)
1678  *
1679  * May sleep if called *not* from interrupt context.
1680  *
1681  * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
1682  */
1683 void vfree(const void *addr)
1684 {
1685 	BUG_ON(in_nmi());
1686 
1687 	kmemleak_free(addr);
1688 
1689 	might_sleep_if(!in_interrupt());
1690 
1691 	if (!addr)
1692 		return;
1693 
1694 	__vfree(addr);
1695 }
1696 EXPORT_SYMBOL(vfree);
1697 
1698 /**
1699  * vunmap - release virtual mapping obtained by vmap()
1700  * @addr:   memory base address
1701  *
1702  * Free the virtually contiguous memory area starting at @addr,
1703  * which was created from the page array passed to vmap().
1704  *
1705  * Must not be called in interrupt context.
1706  */
1707 void vunmap(const void *addr)
1708 {
1709 	BUG_ON(in_interrupt());
1710 	might_sleep();
1711 	if (addr)
1712 		__vunmap(addr, 0);
1713 }
1714 EXPORT_SYMBOL(vunmap);
1715 
1716 /**
1717  * vmap - map an array of pages into virtually contiguous space
1718  * @pages: array of page pointers
1719  * @count: number of pages to map
1720  * @flags: vm_area->flags
1721  * @prot: page protection for the mapping
1722  *
1723  * Maps @count pages from @pages into contiguous kernel virtual
1724  * space.
1725  *
1726  * Return: the address of the area or %NULL on failure
1727  */
1728 void *vmap(struct page **pages, unsigned int count,
1729 	   unsigned long flags, pgprot_t prot)
1730 {
1731 	struct vm_struct *area;
1732 	unsigned long size;		/* In bytes */
1733 
1734 	might_sleep();
1735 
1736 	if (count > totalram_pages())
1737 		return NULL;
1738 
1739 	size = (unsigned long)count << PAGE_SHIFT;
1740 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
1741 	if (!area)
1742 		return NULL;
1743 
1744 	if (map_vm_area(area, prot, pages)) {
1745 		vunmap(area->addr);
1746 		return NULL;
1747 	}
1748 
1749 	return area->addr;
1750 }
1751 EXPORT_SYMBOL(vmap);
1752 
1753 static void *__vmalloc_node(unsigned long size, unsigned long align,
1754 			    gfp_t gfp_mask, pgprot_t prot,
1755 			    int node, const void *caller);
1756 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1757 				 pgprot_t prot, int node)
1758 {
1759 	struct page **pages;
1760 	unsigned int nr_pages, array_size, i;
1761 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1762 	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1763 	const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
1764 					0 :
1765 					__GFP_HIGHMEM;
1766 
1767 	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1768 	array_size = (nr_pages * sizeof(struct page *));
1769 
1770 	area->nr_pages = nr_pages;
1771 	/* Please note that the recursion is strictly bounded. */
1772 	if (array_size > PAGE_SIZE) {
1773 		pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
1774 				PAGE_KERNEL, node, area->caller);
1775 	} else {
1776 		pages = kmalloc_node(array_size, nested_gfp, node);
1777 	}
1778 	area->pages = pages;
1779 	if (!area->pages) {
1780 		remove_vm_area(area->addr);
1781 		kfree(area);
1782 		return NULL;
1783 	}
1784 
1785 	for (i = 0; i < area->nr_pages; i++) {
1786 		struct page *page;
1787 
1788 		if (node == NUMA_NO_NODE)
1789 			page = alloc_page(alloc_mask|highmem_mask);
1790 		else
1791 			page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
1792 
1793 		if (unlikely(!page)) {
1794 			/* Successfully allocated i pages, free them in __vunmap() */
1795 			area->nr_pages = i;
1796 			goto fail;
1797 		}
1798 		area->pages[i] = page;
1799 		if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
1800 			cond_resched();
1801 	}
1802 
1803 	if (map_vm_area(area, prot, pages))
1804 		goto fail;
1805 	return area->addr;
1806 
1807 fail:
1808 	warn_alloc(gfp_mask, NULL,
1809 			  "vmalloc: allocation failure, allocated %ld of %ld bytes",
1810 			  (area->nr_pages*PAGE_SIZE), area->size);
1811 	__vfree(area->addr);
1812 	return NULL;
1813 }
1814 
1815 /**
1816  * __vmalloc_node_range - allocate virtually contiguous memory
1817  * @size:		  allocation size
1818  * @align:		  desired alignment
1819  * @start:		  vm area range start
1820  * @end:		  vm area range end
1821  * @gfp_mask:		  flags for the page level allocator
1822  * @prot:		  protection mask for the allocated pages
1823  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
1824  * @node:		  node to use for allocation or NUMA_NO_NODE
1825  * @caller:		  caller's return address
1826  *
1827  * Allocate enough pages to cover @size from the page level
1828  * allocator with @gfp_mask flags.  Map them into contiguous
1829  * kernel virtual space, using a pagetable protection of @prot.
1830  *
1831  * Return: the address of the area or %NULL on failure
1832  */
1833 void *__vmalloc_node_range(unsigned long size, unsigned long align,
1834 			unsigned long start, unsigned long end, gfp_t gfp_mask,
1835 			pgprot_t prot, unsigned long vm_flags, int node,
1836 			const void *caller)
1837 {
1838 	struct vm_struct *area;
1839 	void *addr;
1840 	unsigned long real_size = size;
1841 
1842 	size = PAGE_ALIGN(size);
1843 	if (!size || (size >> PAGE_SHIFT) > totalram_pages())
1844 		goto fail;
1845 
1846 	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
1847 				vm_flags, start, end, node, gfp_mask, caller);
1848 	if (!area)
1849 		goto fail;
1850 
1851 	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1852 	if (!addr)
1853 		return NULL;
1854 
1855 	/*
1856 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
1857 	 * flag. It means that vm_struct is not fully initialized.
1858 	 * Now, it is fully initialized, so remove this flag here.
1859 	 */
1860 	clear_vm_uninitialized_flag(area);
1861 
1862 	kmemleak_vmalloc(area, size, gfp_mask);
1863 
1864 	return addr;
1865 
1866 fail:
1867 	warn_alloc(gfp_mask, NULL,
1868 			  "vmalloc: allocation failure: %lu bytes", real_size);
1869 	return NULL;
1870 }
1871 
1872 /*
1873  * This is only for performance analysis of vmalloc and stress purpose.
1874  * It is required by vmalloc test module, therefore do not use it other
1875  * than that.
1876  */
1877 #ifdef CONFIG_TEST_VMALLOC_MODULE
1878 EXPORT_SYMBOL_GPL(__vmalloc_node_range);
1879 #endif
1880 
1881 /**
1882  * __vmalloc_node - allocate virtually contiguous memory
1883  * @size:	    allocation size
1884  * @align:	    desired alignment
1885  * @gfp_mask:	    flags for the page level allocator
1886  * @prot:	    protection mask for the allocated pages
1887  * @node:	    node to use for allocation or NUMA_NO_NODE
1888  * @caller:	    caller's return address
1889  *
1890  * Allocate enough pages to cover @size from the page level
1891  * allocator with @gfp_mask flags.  Map them into contiguous
1892  * kernel virtual space, using a pagetable protection of @prot.
1893  *
1894  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
1895  * and __GFP_NOFAIL are not supported
1896  *
1897  * Any use of gfp flags outside of GFP_KERNEL should be consulted
1898  * with mm people.
1899  *
1900  * Return: pointer to the allocated memory or %NULL on error
1901  */
1902 static void *__vmalloc_node(unsigned long size, unsigned long align,
1903 			    gfp_t gfp_mask, pgprot_t prot,
1904 			    int node, const void *caller)
1905 {
1906 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1907 				gfp_mask, prot, 0, node, caller);
1908 }
1909 
1910 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1911 {
1912 	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1913 				__builtin_return_address(0));
1914 }
1915 EXPORT_SYMBOL(__vmalloc);
1916 
1917 static inline void *__vmalloc_node_flags(unsigned long size,
1918 					int node, gfp_t flags)
1919 {
1920 	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1921 					node, __builtin_return_address(0));
1922 }
1923 
1924 
1925 void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
1926 				  void *caller)
1927 {
1928 	return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
1929 }
1930 
1931 /**
1932  * vmalloc - allocate virtually contiguous memory
1933  * @size:    allocation size
1934  *
1935  * Allocate enough pages to cover @size from the page level
1936  * allocator and map them into contiguous kernel virtual space.
1937  *
1938  * For tight control over page level allocator and protection flags
1939  * use __vmalloc() instead.
1940  *
1941  * Return: pointer to the allocated memory or %NULL on error
1942  */
1943 void *vmalloc(unsigned long size)
1944 {
1945 	return __vmalloc_node_flags(size, NUMA_NO_NODE,
1946 				    GFP_KERNEL);
1947 }
1948 EXPORT_SYMBOL(vmalloc);
1949 
1950 /**
1951  * vzalloc - allocate virtually contiguous memory with zero fill
1952  * @size:    allocation size
1953  *
1954  * Allocate enough pages to cover @size from the page level
1955  * allocator and map them into contiguous kernel virtual space.
1956  * The memory allocated is set to zero.
1957  *
1958  * For tight control over page level allocator and protection flags
1959  * use __vmalloc() instead.
1960  *
1961  * Return: pointer to the allocated memory or %NULL on error
1962  */
1963 void *vzalloc(unsigned long size)
1964 {
1965 	return __vmalloc_node_flags(size, NUMA_NO_NODE,
1966 				GFP_KERNEL | __GFP_ZERO);
1967 }
1968 EXPORT_SYMBOL(vzalloc);
1969 
1970 /**
1971  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1972  * @size: allocation size
1973  *
1974  * The resulting memory area is zeroed so it can be mapped to userspace
1975  * without leaking data.
1976  *
1977  * Return: pointer to the allocated memory or %NULL on error
1978  */
1979 void *vmalloc_user(unsigned long size)
1980 {
1981 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
1982 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
1983 				    VM_USERMAP, NUMA_NO_NODE,
1984 				    __builtin_return_address(0));
1985 }
1986 EXPORT_SYMBOL(vmalloc_user);
1987 
1988 /**
1989  * vmalloc_node - allocate memory on a specific node
1990  * @size:	  allocation size
1991  * @node:	  numa node
1992  *
1993  * Allocate enough pages to cover @size from the page level
1994  * allocator and map them into contiguous kernel virtual space.
1995  *
1996  * For tight control over page level allocator and protection flags
1997  * use __vmalloc() instead.
1998  *
1999  * Return: pointer to the allocated memory or %NULL on error
2000  */
2001 void *vmalloc_node(unsigned long size, int node)
2002 {
2003 	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
2004 					node, __builtin_return_address(0));
2005 }
2006 EXPORT_SYMBOL(vmalloc_node);
2007 
2008 /**
2009  * vzalloc_node - allocate memory on a specific node with zero fill
2010  * @size:	allocation size
2011  * @node:	numa node
2012  *
2013  * Allocate enough pages to cover @size from the page level
2014  * allocator and map them into contiguous kernel virtual space.
2015  * The memory allocated is set to zero.
2016  *
2017  * For tight control over page level allocator and protection flags
2018  * use __vmalloc_node() instead.
2019  *
2020  * Return: pointer to the allocated memory or %NULL on error
2021  */
2022 void *vzalloc_node(unsigned long size, int node)
2023 {
2024 	return __vmalloc_node_flags(size, node,
2025 			 GFP_KERNEL | __GFP_ZERO);
2026 }
2027 EXPORT_SYMBOL(vzalloc_node);
2028 
2029 /**
2030  * vmalloc_exec - allocate virtually contiguous, executable memory
2031  * @size:	  allocation size
2032  *
2033  * Kernel-internal function to allocate enough pages to cover @size
2034  * the page level allocator and map them into contiguous and
2035  * executable kernel virtual space.
2036  *
2037  * For tight control over page level allocator and protection flags
2038  * use __vmalloc() instead.
2039  *
2040  * Return: pointer to the allocated memory or %NULL on error
2041  */
2042 void *vmalloc_exec(unsigned long size)
2043 {
2044 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2045 			GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2046 			NUMA_NO_NODE, __builtin_return_address(0));
2047 }
2048 
2049 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2050 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2051 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2052 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2053 #else
2054 /*
2055  * 64b systems should always have either DMA or DMA32 zones. For others
2056  * GFP_DMA32 should do the right thing and use the normal zone.
2057  */
2058 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2059 #endif
2060 
2061 /**
2062  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2063  * @size:	allocation size
2064  *
2065  * Allocate enough 32bit PA addressable pages to cover @size from the
2066  * page level allocator and map them into contiguous kernel virtual space.
2067  *
2068  * Return: pointer to the allocated memory or %NULL on error
2069  */
2070 void *vmalloc_32(unsigned long size)
2071 {
2072 	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
2073 			      NUMA_NO_NODE, __builtin_return_address(0));
2074 }
2075 EXPORT_SYMBOL(vmalloc_32);
2076 
2077 /**
2078  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2079  * @size:	     allocation size
2080  *
2081  * The resulting memory area is 32bit addressable and zeroed so it can be
2082  * mapped to userspace without leaking data.
2083  *
2084  * Return: pointer to the allocated memory or %NULL on error
2085  */
2086 void *vmalloc_32_user(unsigned long size)
2087 {
2088 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2089 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2090 				    VM_USERMAP, NUMA_NO_NODE,
2091 				    __builtin_return_address(0));
2092 }
2093 EXPORT_SYMBOL(vmalloc_32_user);
2094 
2095 /*
2096  * small helper routine , copy contents to buf from addr.
2097  * If the page is not present, fill zero.
2098  */
2099 
2100 static int aligned_vread(char *buf, char *addr, unsigned long count)
2101 {
2102 	struct page *p;
2103 	int copied = 0;
2104 
2105 	while (count) {
2106 		unsigned long offset, length;
2107 
2108 		offset = offset_in_page(addr);
2109 		length = PAGE_SIZE - offset;
2110 		if (length > count)
2111 			length = count;
2112 		p = vmalloc_to_page(addr);
2113 		/*
2114 		 * To do safe access to this _mapped_ area, we need
2115 		 * lock. But adding lock here means that we need to add
2116 		 * overhead of vmalloc()/vfree() calles for this _debug_
2117 		 * interface, rarely used. Instead of that, we'll use
2118 		 * kmap() and get small overhead in this access function.
2119 		 */
2120 		if (p) {
2121 			/*
2122 			 * we can expect USER0 is not used (see vread/vwrite's
2123 			 * function description)
2124 			 */
2125 			void *map = kmap_atomic(p);
2126 			memcpy(buf, map + offset, length);
2127 			kunmap_atomic(map);
2128 		} else
2129 			memset(buf, 0, length);
2130 
2131 		addr += length;
2132 		buf += length;
2133 		copied += length;
2134 		count -= length;
2135 	}
2136 	return copied;
2137 }
2138 
2139 static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2140 {
2141 	struct page *p;
2142 	int copied = 0;
2143 
2144 	while (count) {
2145 		unsigned long offset, length;
2146 
2147 		offset = offset_in_page(addr);
2148 		length = PAGE_SIZE - offset;
2149 		if (length > count)
2150 			length = count;
2151 		p = vmalloc_to_page(addr);
2152 		/*
2153 		 * To do safe access to this _mapped_ area, we need
2154 		 * lock. But adding lock here means that we need to add
2155 		 * overhead of vmalloc()/vfree() calles for this _debug_
2156 		 * interface, rarely used. Instead of that, we'll use
2157 		 * kmap() and get small overhead in this access function.
2158 		 */
2159 		if (p) {
2160 			/*
2161 			 * we can expect USER0 is not used (see vread/vwrite's
2162 			 * function description)
2163 			 */
2164 			void *map = kmap_atomic(p);
2165 			memcpy(map + offset, buf, length);
2166 			kunmap_atomic(map);
2167 		}
2168 		addr += length;
2169 		buf += length;
2170 		copied += length;
2171 		count -= length;
2172 	}
2173 	return copied;
2174 }
2175 
2176 /**
2177  * vread() - read vmalloc area in a safe way.
2178  * @buf:     buffer for reading data
2179  * @addr:    vm address.
2180  * @count:   number of bytes to be read.
2181  *
2182  * This function checks that addr is a valid vmalloc'ed area, and
2183  * copy data from that area to a given buffer. If the given memory range
2184  * of [addr...addr+count) includes some valid address, data is copied to
2185  * proper area of @buf. If there are memory holes, they'll be zero-filled.
2186  * IOREMAP area is treated as memory hole and no copy is done.
2187  *
2188  * If [addr...addr+count) doesn't includes any intersects with alive
2189  * vm_struct area, returns 0. @buf should be kernel's buffer.
2190  *
2191  * Note: In usual ops, vread() is never necessary because the caller
2192  * should know vmalloc() area is valid and can use memcpy().
2193  * This is for routines which have to access vmalloc area without
2194  * any informaion, as /dev/kmem.
2195  *
2196  * Return: number of bytes for which addr and buf should be increased
2197  * (same number as @count) or %0 if [addr...addr+count) doesn't
2198  * include any intersection with valid vmalloc area
2199  */
2200 long vread(char *buf, char *addr, unsigned long count)
2201 {
2202 	struct vmap_area *va;
2203 	struct vm_struct *vm;
2204 	char *vaddr, *buf_start = buf;
2205 	unsigned long buflen = count;
2206 	unsigned long n;
2207 
2208 	/* Don't allow overflow */
2209 	if ((unsigned long) addr + count < count)
2210 		count = -(unsigned long) addr;
2211 
2212 	spin_lock(&vmap_area_lock);
2213 	list_for_each_entry(va, &vmap_area_list, list) {
2214 		if (!count)
2215 			break;
2216 
2217 		if (!(va->flags & VM_VM_AREA))
2218 			continue;
2219 
2220 		vm = va->vm;
2221 		vaddr = (char *) vm->addr;
2222 		if (addr >= vaddr + get_vm_area_size(vm))
2223 			continue;
2224 		while (addr < vaddr) {
2225 			if (count == 0)
2226 				goto finished;
2227 			*buf = '\0';
2228 			buf++;
2229 			addr++;
2230 			count--;
2231 		}
2232 		n = vaddr + get_vm_area_size(vm) - addr;
2233 		if (n > count)
2234 			n = count;
2235 		if (!(vm->flags & VM_IOREMAP))
2236 			aligned_vread(buf, addr, n);
2237 		else /* IOREMAP area is treated as memory hole */
2238 			memset(buf, 0, n);
2239 		buf += n;
2240 		addr += n;
2241 		count -= n;
2242 	}
2243 finished:
2244 	spin_unlock(&vmap_area_lock);
2245 
2246 	if (buf == buf_start)
2247 		return 0;
2248 	/* zero-fill memory holes */
2249 	if (buf != buf_start + buflen)
2250 		memset(buf, 0, buflen - (buf - buf_start));
2251 
2252 	return buflen;
2253 }
2254 
2255 /**
2256  * vwrite() - write vmalloc area in a safe way.
2257  * @buf:      buffer for source data
2258  * @addr:     vm address.
2259  * @count:    number of bytes to be read.
2260  *
2261  * This function checks that addr is a valid vmalloc'ed area, and
2262  * copy data from a buffer to the given addr. If specified range of
2263  * [addr...addr+count) includes some valid address, data is copied from
2264  * proper area of @buf. If there are memory holes, no copy to hole.
2265  * IOREMAP area is treated as memory hole and no copy is done.
2266  *
2267  * If [addr...addr+count) doesn't includes any intersects with alive
2268  * vm_struct area, returns 0. @buf should be kernel's buffer.
2269  *
2270  * Note: In usual ops, vwrite() is never necessary because the caller
2271  * should know vmalloc() area is valid and can use memcpy().
2272  * This is for routines which have to access vmalloc area without
2273  * any informaion, as /dev/kmem.
2274  *
2275  * Return: number of bytes for which addr and buf should be
2276  * increased (same number as @count) or %0 if [addr...addr+count)
2277  * doesn't include any intersection with valid vmalloc area
2278  */
2279 long vwrite(char *buf, char *addr, unsigned long count)
2280 {
2281 	struct vmap_area *va;
2282 	struct vm_struct *vm;
2283 	char *vaddr;
2284 	unsigned long n, buflen;
2285 	int copied = 0;
2286 
2287 	/* Don't allow overflow */
2288 	if ((unsigned long) addr + count < count)
2289 		count = -(unsigned long) addr;
2290 	buflen = count;
2291 
2292 	spin_lock(&vmap_area_lock);
2293 	list_for_each_entry(va, &vmap_area_list, list) {
2294 		if (!count)
2295 			break;
2296 
2297 		if (!(va->flags & VM_VM_AREA))
2298 			continue;
2299 
2300 		vm = va->vm;
2301 		vaddr = (char *) vm->addr;
2302 		if (addr >= vaddr + get_vm_area_size(vm))
2303 			continue;
2304 		while (addr < vaddr) {
2305 			if (count == 0)
2306 				goto finished;
2307 			buf++;
2308 			addr++;
2309 			count--;
2310 		}
2311 		n = vaddr + get_vm_area_size(vm) - addr;
2312 		if (n > count)
2313 			n = count;
2314 		if (!(vm->flags & VM_IOREMAP)) {
2315 			aligned_vwrite(buf, addr, n);
2316 			copied++;
2317 		}
2318 		buf += n;
2319 		addr += n;
2320 		count -= n;
2321 	}
2322 finished:
2323 	spin_unlock(&vmap_area_lock);
2324 	if (!copied)
2325 		return 0;
2326 	return buflen;
2327 }
2328 
2329 /**
2330  * remap_vmalloc_range_partial - map vmalloc pages to userspace
2331  * @vma:		vma to cover
2332  * @uaddr:		target user address to start at
2333  * @kaddr:		virtual address of vmalloc kernel memory
2334  * @size:		size of map area
2335  *
2336  * Returns:	0 for success, -Exxx on failure
2337  *
2338  * This function checks that @kaddr is a valid vmalloc'ed area,
2339  * and that it is big enough to cover the range starting at
2340  * @uaddr in @vma. Will return failure if that criteria isn't
2341  * met.
2342  *
2343  * Similar to remap_pfn_range() (see mm/memory.c)
2344  */
2345 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2346 				void *kaddr, unsigned long size)
2347 {
2348 	struct vm_struct *area;
2349 
2350 	size = PAGE_ALIGN(size);
2351 
2352 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2353 		return -EINVAL;
2354 
2355 	area = find_vm_area(kaddr);
2356 	if (!area)
2357 		return -EINVAL;
2358 
2359 	if (!(area->flags & VM_USERMAP))
2360 		return -EINVAL;
2361 
2362 	if (kaddr + size > area->addr + get_vm_area_size(area))
2363 		return -EINVAL;
2364 
2365 	do {
2366 		struct page *page = vmalloc_to_page(kaddr);
2367 		int ret;
2368 
2369 		ret = vm_insert_page(vma, uaddr, page);
2370 		if (ret)
2371 			return ret;
2372 
2373 		uaddr += PAGE_SIZE;
2374 		kaddr += PAGE_SIZE;
2375 		size -= PAGE_SIZE;
2376 	} while (size > 0);
2377 
2378 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2379 
2380 	return 0;
2381 }
2382 EXPORT_SYMBOL(remap_vmalloc_range_partial);
2383 
2384 /**
2385  * remap_vmalloc_range - map vmalloc pages to userspace
2386  * @vma:		vma to cover (map full range of vma)
2387  * @addr:		vmalloc memory
2388  * @pgoff:		number of pages into addr before first page to map
2389  *
2390  * Returns:	0 for success, -Exxx on failure
2391  *
2392  * This function checks that addr is a valid vmalloc'ed area, and
2393  * that it is big enough to cover the vma. Will return failure if
2394  * that criteria isn't met.
2395  *
2396  * Similar to remap_pfn_range() (see mm/memory.c)
2397  */
2398 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2399 						unsigned long pgoff)
2400 {
2401 	return remap_vmalloc_range_partial(vma, vma->vm_start,
2402 					   addr + (pgoff << PAGE_SHIFT),
2403 					   vma->vm_end - vma->vm_start);
2404 }
2405 EXPORT_SYMBOL(remap_vmalloc_range);
2406 
2407 /*
2408  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
2409  * have one.
2410  */
2411 void __weak vmalloc_sync_all(void)
2412 {
2413 }
2414 
2415 
2416 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2417 {
2418 	pte_t ***p = data;
2419 
2420 	if (p) {
2421 		*(*p) = pte;
2422 		(*p)++;
2423 	}
2424 	return 0;
2425 }
2426 
2427 /**
2428  * alloc_vm_area - allocate a range of kernel address space
2429  * @size:	   size of the area
2430  * @ptes:	   returns the PTEs for the address space
2431  *
2432  * Returns:	NULL on failure, vm_struct on success
2433  *
2434  * This function reserves a range of kernel address space, and
2435  * allocates pagetables to map that range.  No actual mappings
2436  * are created.
2437  *
2438  * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2439  * allocated for the VM area are returned.
2440  */
2441 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2442 {
2443 	struct vm_struct *area;
2444 
2445 	area = get_vm_area_caller(size, VM_IOREMAP,
2446 				__builtin_return_address(0));
2447 	if (area == NULL)
2448 		return NULL;
2449 
2450 	/*
2451 	 * This ensures that page tables are constructed for this region
2452 	 * of kernel virtual address space and mapped into init_mm.
2453 	 */
2454 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2455 				size, f, ptes ? &ptes : NULL)) {
2456 		free_vm_area(area);
2457 		return NULL;
2458 	}
2459 
2460 	return area;
2461 }
2462 EXPORT_SYMBOL_GPL(alloc_vm_area);
2463 
2464 void free_vm_area(struct vm_struct *area)
2465 {
2466 	struct vm_struct *ret;
2467 	ret = remove_vm_area(area->addr);
2468 	BUG_ON(ret != area);
2469 	kfree(area);
2470 }
2471 EXPORT_SYMBOL_GPL(free_vm_area);
2472 
2473 #ifdef CONFIG_SMP
2474 static struct vmap_area *node_to_va(struct rb_node *n)
2475 {
2476 	return rb_entry_safe(n, struct vmap_area, rb_node);
2477 }
2478 
2479 /**
2480  * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
2481  * @end: target address
2482  * @pnext: out arg for the next vmap_area
2483  * @pprev: out arg for the previous vmap_area
2484  *
2485  * Returns: %true if either or both of next and prev are found,
2486  *	    %false if no vmap_area exists
2487  *
2488  * Find vmap_areas end addresses of which enclose @end.  ie. if not
2489  * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
2490  */
2491 static bool pvm_find_next_prev(unsigned long end,
2492 			       struct vmap_area **pnext,
2493 			       struct vmap_area **pprev)
2494 {
2495 	struct rb_node *n = vmap_area_root.rb_node;
2496 	struct vmap_area *va = NULL;
2497 
2498 	while (n) {
2499 		va = rb_entry(n, struct vmap_area, rb_node);
2500 		if (end < va->va_end)
2501 			n = n->rb_left;
2502 		else if (end > va->va_end)
2503 			n = n->rb_right;
2504 		else
2505 			break;
2506 	}
2507 
2508 	if (!va)
2509 		return false;
2510 
2511 	if (va->va_end > end) {
2512 		*pnext = va;
2513 		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2514 	} else {
2515 		*pprev = va;
2516 		*pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2517 	}
2518 	return true;
2519 }
2520 
2521 /**
2522  * pvm_determine_end - find the highest aligned address between two vmap_areas
2523  * @pnext: in/out arg for the next vmap_area
2524  * @pprev: in/out arg for the previous vmap_area
2525  * @align: alignment
2526  *
2527  * Returns: determined end address
2528  *
2529  * Find the highest aligned address between *@pnext and *@pprev below
2530  * VMALLOC_END.  *@pnext and *@pprev are adjusted so that the aligned
2531  * down address is between the end addresses of the two vmap_areas.
2532  *
2533  * Please note that the address returned by this function may fall
2534  * inside *@pnext vmap_area.  The caller is responsible for checking
2535  * that.
2536  */
2537 static unsigned long pvm_determine_end(struct vmap_area **pnext,
2538 				       struct vmap_area **pprev,
2539 				       unsigned long align)
2540 {
2541 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2542 	unsigned long addr;
2543 
2544 	if (*pnext)
2545 		addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2546 	else
2547 		addr = vmalloc_end;
2548 
2549 	while (*pprev && (*pprev)->va_end > addr) {
2550 		*pnext = *pprev;
2551 		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2552 	}
2553 
2554 	return addr;
2555 }
2556 
2557 /**
2558  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2559  * @offsets: array containing offset of each area
2560  * @sizes: array containing size of each area
2561  * @nr_vms: the number of areas to allocate
2562  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
2563  *
2564  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2565  *	    vm_structs on success, %NULL on failure
2566  *
2567  * Percpu allocator wants to use congruent vm areas so that it can
2568  * maintain the offsets among percpu areas.  This function allocates
2569  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
2570  * be scattered pretty far, distance between two areas easily going up
2571  * to gigabytes.  To avoid interacting with regular vmallocs, these
2572  * areas are allocated from top.
2573  *
2574  * Despite its complicated look, this allocator is rather simple.  It
2575  * does everything top-down and scans areas from the end looking for
2576  * matching slot.  While scanning, if any of the areas overlaps with
2577  * existing vmap_area, the base address is pulled down to fit the
2578  * area.  Scanning is repeated till all the areas fit and then all
2579  * necessary data structures are inserted and the result is returned.
2580  */
2581 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2582 				     const size_t *sizes, int nr_vms,
2583 				     size_t align)
2584 {
2585 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2586 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2587 	struct vmap_area **vas, *prev, *next;
2588 	struct vm_struct **vms;
2589 	int area, area2, last_area, term_area;
2590 	unsigned long base, start, end, last_end;
2591 	bool purged = false;
2592 
2593 	/* verify parameters and allocate data structures */
2594 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
2595 	for (last_area = 0, area = 0; area < nr_vms; area++) {
2596 		start = offsets[area];
2597 		end = start + sizes[area];
2598 
2599 		/* is everything aligned properly? */
2600 		BUG_ON(!IS_ALIGNED(offsets[area], align));
2601 		BUG_ON(!IS_ALIGNED(sizes[area], align));
2602 
2603 		/* detect the area with the highest address */
2604 		if (start > offsets[last_area])
2605 			last_area = area;
2606 
2607 		for (area2 = area + 1; area2 < nr_vms; area2++) {
2608 			unsigned long start2 = offsets[area2];
2609 			unsigned long end2 = start2 + sizes[area2];
2610 
2611 			BUG_ON(start2 < end && start < end2);
2612 		}
2613 	}
2614 	last_end = offsets[last_area] + sizes[last_area];
2615 
2616 	if (vmalloc_end - vmalloc_start < last_end) {
2617 		WARN_ON(true);
2618 		return NULL;
2619 	}
2620 
2621 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2622 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2623 	if (!vas || !vms)
2624 		goto err_free2;
2625 
2626 	for (area = 0; area < nr_vms; area++) {
2627 		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2628 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2629 		if (!vas[area] || !vms[area])
2630 			goto err_free;
2631 	}
2632 retry:
2633 	spin_lock(&vmap_area_lock);
2634 
2635 	/* start scanning - we scan from the top, begin with the last area */
2636 	area = term_area = last_area;
2637 	start = offsets[area];
2638 	end = start + sizes[area];
2639 
2640 	if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2641 		base = vmalloc_end - last_end;
2642 		goto found;
2643 	}
2644 	base = pvm_determine_end(&next, &prev, align) - end;
2645 
2646 	while (true) {
2647 		BUG_ON(next && next->va_end <= base + end);
2648 		BUG_ON(prev && prev->va_end > base + end);
2649 
2650 		/*
2651 		 * base might have underflowed, add last_end before
2652 		 * comparing.
2653 		 */
2654 		if (base + last_end < vmalloc_start + last_end) {
2655 			spin_unlock(&vmap_area_lock);
2656 			if (!purged) {
2657 				purge_vmap_area_lazy();
2658 				purged = true;
2659 				goto retry;
2660 			}
2661 			goto err_free;
2662 		}
2663 
2664 		/*
2665 		 * If next overlaps, move base downwards so that it's
2666 		 * right below next and then recheck.
2667 		 */
2668 		if (next && next->va_start < base + end) {
2669 			base = pvm_determine_end(&next, &prev, align) - end;
2670 			term_area = area;
2671 			continue;
2672 		}
2673 
2674 		/*
2675 		 * If prev overlaps, shift down next and prev and move
2676 		 * base so that it's right below new next and then
2677 		 * recheck.
2678 		 */
2679 		if (prev && prev->va_end > base + start)  {
2680 			next = prev;
2681 			prev = node_to_va(rb_prev(&next->rb_node));
2682 			base = pvm_determine_end(&next, &prev, align) - end;
2683 			term_area = area;
2684 			continue;
2685 		}
2686 
2687 		/*
2688 		 * This area fits, move on to the previous one.  If
2689 		 * the previous one is the terminal one, we're done.
2690 		 */
2691 		area = (area + nr_vms - 1) % nr_vms;
2692 		if (area == term_area)
2693 			break;
2694 		start = offsets[area];
2695 		end = start + sizes[area];
2696 		pvm_find_next_prev(base + end, &next, &prev);
2697 	}
2698 found:
2699 	/* we've found a fitting base, insert all va's */
2700 	for (area = 0; area < nr_vms; area++) {
2701 		struct vmap_area *va = vas[area];
2702 
2703 		va->va_start = base + offsets[area];
2704 		va->va_end = va->va_start + sizes[area];
2705 		__insert_vmap_area(va);
2706 	}
2707 
2708 	vmap_area_pcpu_hole = base + offsets[last_area];
2709 
2710 	spin_unlock(&vmap_area_lock);
2711 
2712 	/* insert all vm's */
2713 	for (area = 0; area < nr_vms; area++)
2714 		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2715 				 pcpu_get_vm_areas);
2716 
2717 	kfree(vas);
2718 	return vms;
2719 
2720 err_free:
2721 	for (area = 0; area < nr_vms; area++) {
2722 		kfree(vas[area]);
2723 		kfree(vms[area]);
2724 	}
2725 err_free2:
2726 	kfree(vas);
2727 	kfree(vms);
2728 	return NULL;
2729 }
2730 
2731 /**
2732  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2733  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
2734  * @nr_vms: the number of allocated areas
2735  *
2736  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
2737  */
2738 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2739 {
2740 	int i;
2741 
2742 	for (i = 0; i < nr_vms; i++)
2743 		free_vm_area(vms[i]);
2744 	kfree(vms);
2745 }
2746 #endif	/* CONFIG_SMP */
2747 
2748 #ifdef CONFIG_PROC_FS
2749 static void *s_start(struct seq_file *m, loff_t *pos)
2750 	__acquires(&vmap_area_lock)
2751 {
2752 	spin_lock(&vmap_area_lock);
2753 	return seq_list_start(&vmap_area_list, *pos);
2754 }
2755 
2756 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2757 {
2758 	return seq_list_next(p, &vmap_area_list, pos);
2759 }
2760 
2761 static void s_stop(struct seq_file *m, void *p)
2762 	__releases(&vmap_area_lock)
2763 {
2764 	spin_unlock(&vmap_area_lock);
2765 }
2766 
2767 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2768 {
2769 	if (IS_ENABLED(CONFIG_NUMA)) {
2770 		unsigned int nr, *counters = m->private;
2771 
2772 		if (!counters)
2773 			return;
2774 
2775 		if (v->flags & VM_UNINITIALIZED)
2776 			return;
2777 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
2778 		smp_rmb();
2779 
2780 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2781 
2782 		for (nr = 0; nr < v->nr_pages; nr++)
2783 			counters[page_to_nid(v->pages[nr])]++;
2784 
2785 		for_each_node_state(nr, N_HIGH_MEMORY)
2786 			if (counters[nr])
2787 				seq_printf(m, " N%u=%u", nr, counters[nr]);
2788 	}
2789 }
2790 
2791 static int s_show(struct seq_file *m, void *p)
2792 {
2793 	struct vmap_area *va;
2794 	struct vm_struct *v;
2795 
2796 	va = list_entry(p, struct vmap_area, list);
2797 
2798 	/*
2799 	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
2800 	 * behalf of vmap area is being tear down or vm_map_ram allocation.
2801 	 */
2802 	if (!(va->flags & VM_VM_AREA)) {
2803 		seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
2804 			(void *)va->va_start, (void *)va->va_end,
2805 			va->va_end - va->va_start,
2806 			va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
2807 
2808 		return 0;
2809 	}
2810 
2811 	v = va->vm;
2812 
2813 	seq_printf(m, "0x%pK-0x%pK %7ld",
2814 		v->addr, v->addr + v->size, v->size);
2815 
2816 	if (v->caller)
2817 		seq_printf(m, " %pS", v->caller);
2818 
2819 	if (v->nr_pages)
2820 		seq_printf(m, " pages=%d", v->nr_pages);
2821 
2822 	if (v->phys_addr)
2823 		seq_printf(m, " phys=%pa", &v->phys_addr);
2824 
2825 	if (v->flags & VM_IOREMAP)
2826 		seq_puts(m, " ioremap");
2827 
2828 	if (v->flags & VM_ALLOC)
2829 		seq_puts(m, " vmalloc");
2830 
2831 	if (v->flags & VM_MAP)
2832 		seq_puts(m, " vmap");
2833 
2834 	if (v->flags & VM_USERMAP)
2835 		seq_puts(m, " user");
2836 
2837 	if (is_vmalloc_addr(v->pages))
2838 		seq_puts(m, " vpages");
2839 
2840 	show_numa_info(m, v);
2841 	seq_putc(m, '\n');
2842 	return 0;
2843 }
2844 
2845 static const struct seq_operations vmalloc_op = {
2846 	.start = s_start,
2847 	.next = s_next,
2848 	.stop = s_stop,
2849 	.show = s_show,
2850 };
2851 
2852 static int __init proc_vmalloc_init(void)
2853 {
2854 	if (IS_ENABLED(CONFIG_NUMA))
2855 		proc_create_seq_private("vmallocinfo", 0400, NULL,
2856 				&vmalloc_op,
2857 				nr_node_ids * sizeof(unsigned int), NULL);
2858 	else
2859 		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
2860 	return 0;
2861 }
2862 module_init(proc_vmalloc_init);
2863 
2864 #endif
2865