xref: /openbmc/linux/mm/vmalloc.c (revision 47010c04)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1993  Linus Torvalds
4  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7  *  Numa awareness, Christoph Lameter, SGI, June 2005
8  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/io.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/bitops.h>
37 #include <linux/rbtree_augmented.h>
38 #include <linux/overflow.h>
39 #include <linux/pgtable.h>
40 #include <linux/uaccess.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
45 
46 #include "internal.h"
47 #include "pgalloc-track.h"
48 
49 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
50 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
51 
52 static int __init set_nohugeiomap(char *str)
53 {
54 	ioremap_max_page_shift = PAGE_SHIFT;
55 	return 0;
56 }
57 early_param("nohugeiomap", set_nohugeiomap);
58 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
59 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
60 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
61 
62 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
63 static bool __ro_after_init vmap_allow_huge = true;
64 
65 static int __init set_nohugevmalloc(char *str)
66 {
67 	vmap_allow_huge = false;
68 	return 0;
69 }
70 early_param("nohugevmalloc", set_nohugevmalloc);
71 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
72 static const bool vmap_allow_huge = false;
73 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
74 
75 bool is_vmalloc_addr(const void *x)
76 {
77 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
78 
79 	return addr >= VMALLOC_START && addr < VMALLOC_END;
80 }
81 EXPORT_SYMBOL(is_vmalloc_addr);
82 
83 struct vfree_deferred {
84 	struct llist_head list;
85 	struct work_struct wq;
86 };
87 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
88 
89 static void __vunmap(const void *, int);
90 
91 static void free_work(struct work_struct *w)
92 {
93 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
94 	struct llist_node *t, *llnode;
95 
96 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
97 		__vunmap((void *)llnode, 1);
98 }
99 
100 /*** Page table manipulation functions ***/
101 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
102 			phys_addr_t phys_addr, pgprot_t prot,
103 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
104 {
105 	pte_t *pte;
106 	u64 pfn;
107 	unsigned long size = PAGE_SIZE;
108 
109 	pfn = phys_addr >> PAGE_SHIFT;
110 	pte = pte_alloc_kernel_track(pmd, addr, mask);
111 	if (!pte)
112 		return -ENOMEM;
113 	do {
114 		BUG_ON(!pte_none(*pte));
115 
116 #ifdef CONFIG_HUGETLB_PAGE
117 		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
118 		if (size != PAGE_SIZE) {
119 			pte_t entry = pfn_pte(pfn, prot);
120 
121 			entry = arch_make_huge_pte(entry, ilog2(size), 0);
122 			set_huge_pte_at(&init_mm, addr, pte, entry);
123 			pfn += PFN_DOWN(size);
124 			continue;
125 		}
126 #endif
127 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
128 		pfn++;
129 	} while (pte += PFN_DOWN(size), addr += size, addr != end);
130 	*mask |= PGTBL_PTE_MODIFIED;
131 	return 0;
132 }
133 
134 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
135 			phys_addr_t phys_addr, pgprot_t prot,
136 			unsigned int max_page_shift)
137 {
138 	if (max_page_shift < PMD_SHIFT)
139 		return 0;
140 
141 	if (!arch_vmap_pmd_supported(prot))
142 		return 0;
143 
144 	if ((end - addr) != PMD_SIZE)
145 		return 0;
146 
147 	if (!IS_ALIGNED(addr, PMD_SIZE))
148 		return 0;
149 
150 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
151 		return 0;
152 
153 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
154 		return 0;
155 
156 	return pmd_set_huge(pmd, phys_addr, prot);
157 }
158 
159 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
160 			phys_addr_t phys_addr, pgprot_t prot,
161 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
162 {
163 	pmd_t *pmd;
164 	unsigned long next;
165 
166 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
167 	if (!pmd)
168 		return -ENOMEM;
169 	do {
170 		next = pmd_addr_end(addr, end);
171 
172 		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
173 					max_page_shift)) {
174 			*mask |= PGTBL_PMD_MODIFIED;
175 			continue;
176 		}
177 
178 		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
179 			return -ENOMEM;
180 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
181 	return 0;
182 }
183 
184 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
185 			phys_addr_t phys_addr, pgprot_t prot,
186 			unsigned int max_page_shift)
187 {
188 	if (max_page_shift < PUD_SHIFT)
189 		return 0;
190 
191 	if (!arch_vmap_pud_supported(prot))
192 		return 0;
193 
194 	if ((end - addr) != PUD_SIZE)
195 		return 0;
196 
197 	if (!IS_ALIGNED(addr, PUD_SIZE))
198 		return 0;
199 
200 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
201 		return 0;
202 
203 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
204 		return 0;
205 
206 	return pud_set_huge(pud, phys_addr, prot);
207 }
208 
209 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
210 			phys_addr_t phys_addr, pgprot_t prot,
211 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
212 {
213 	pud_t *pud;
214 	unsigned long next;
215 
216 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
217 	if (!pud)
218 		return -ENOMEM;
219 	do {
220 		next = pud_addr_end(addr, end);
221 
222 		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
223 					max_page_shift)) {
224 			*mask |= PGTBL_PUD_MODIFIED;
225 			continue;
226 		}
227 
228 		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
229 					max_page_shift, mask))
230 			return -ENOMEM;
231 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
232 	return 0;
233 }
234 
235 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
236 			phys_addr_t phys_addr, pgprot_t prot,
237 			unsigned int max_page_shift)
238 {
239 	if (max_page_shift < P4D_SHIFT)
240 		return 0;
241 
242 	if (!arch_vmap_p4d_supported(prot))
243 		return 0;
244 
245 	if ((end - addr) != P4D_SIZE)
246 		return 0;
247 
248 	if (!IS_ALIGNED(addr, P4D_SIZE))
249 		return 0;
250 
251 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
252 		return 0;
253 
254 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
255 		return 0;
256 
257 	return p4d_set_huge(p4d, phys_addr, prot);
258 }
259 
260 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
261 			phys_addr_t phys_addr, pgprot_t prot,
262 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
263 {
264 	p4d_t *p4d;
265 	unsigned long next;
266 
267 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
268 	if (!p4d)
269 		return -ENOMEM;
270 	do {
271 		next = p4d_addr_end(addr, end);
272 
273 		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
274 					max_page_shift)) {
275 			*mask |= PGTBL_P4D_MODIFIED;
276 			continue;
277 		}
278 
279 		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
280 					max_page_shift, mask))
281 			return -ENOMEM;
282 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
283 	return 0;
284 }
285 
286 static int vmap_range_noflush(unsigned long addr, unsigned long end,
287 			phys_addr_t phys_addr, pgprot_t prot,
288 			unsigned int max_page_shift)
289 {
290 	pgd_t *pgd;
291 	unsigned long start;
292 	unsigned long next;
293 	int err;
294 	pgtbl_mod_mask mask = 0;
295 
296 	might_sleep();
297 	BUG_ON(addr >= end);
298 
299 	start = addr;
300 	pgd = pgd_offset_k(addr);
301 	do {
302 		next = pgd_addr_end(addr, end);
303 		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
304 					max_page_shift, &mask);
305 		if (err)
306 			break;
307 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
308 
309 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
310 		arch_sync_kernel_mappings(start, end);
311 
312 	return err;
313 }
314 
315 int ioremap_page_range(unsigned long addr, unsigned long end,
316 		phys_addr_t phys_addr, pgprot_t prot)
317 {
318 	int err;
319 
320 	err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
321 				 ioremap_max_page_shift);
322 	flush_cache_vmap(addr, end);
323 	return err;
324 }
325 
326 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
327 			     pgtbl_mod_mask *mask)
328 {
329 	pte_t *pte;
330 
331 	pte = pte_offset_kernel(pmd, addr);
332 	do {
333 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
334 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
335 	} while (pte++, addr += PAGE_SIZE, addr != end);
336 	*mask |= PGTBL_PTE_MODIFIED;
337 }
338 
339 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
340 			     pgtbl_mod_mask *mask)
341 {
342 	pmd_t *pmd;
343 	unsigned long next;
344 	int cleared;
345 
346 	pmd = pmd_offset(pud, addr);
347 	do {
348 		next = pmd_addr_end(addr, end);
349 
350 		cleared = pmd_clear_huge(pmd);
351 		if (cleared || pmd_bad(*pmd))
352 			*mask |= PGTBL_PMD_MODIFIED;
353 
354 		if (cleared)
355 			continue;
356 		if (pmd_none_or_clear_bad(pmd))
357 			continue;
358 		vunmap_pte_range(pmd, addr, next, mask);
359 
360 		cond_resched();
361 	} while (pmd++, addr = next, addr != end);
362 }
363 
364 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
365 			     pgtbl_mod_mask *mask)
366 {
367 	pud_t *pud;
368 	unsigned long next;
369 	int cleared;
370 
371 	pud = pud_offset(p4d, addr);
372 	do {
373 		next = pud_addr_end(addr, end);
374 
375 		cleared = pud_clear_huge(pud);
376 		if (cleared || pud_bad(*pud))
377 			*mask |= PGTBL_PUD_MODIFIED;
378 
379 		if (cleared)
380 			continue;
381 		if (pud_none_or_clear_bad(pud))
382 			continue;
383 		vunmap_pmd_range(pud, addr, next, mask);
384 	} while (pud++, addr = next, addr != end);
385 }
386 
387 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
388 			     pgtbl_mod_mask *mask)
389 {
390 	p4d_t *p4d;
391 	unsigned long next;
392 	int cleared;
393 
394 	p4d = p4d_offset(pgd, addr);
395 	do {
396 		next = p4d_addr_end(addr, end);
397 
398 		cleared = p4d_clear_huge(p4d);
399 		if (cleared || p4d_bad(*p4d))
400 			*mask |= PGTBL_P4D_MODIFIED;
401 
402 		if (cleared)
403 			continue;
404 		if (p4d_none_or_clear_bad(p4d))
405 			continue;
406 		vunmap_pud_range(p4d, addr, next, mask);
407 	} while (p4d++, addr = next, addr != end);
408 }
409 
410 /*
411  * vunmap_range_noflush is similar to vunmap_range, but does not
412  * flush caches or TLBs.
413  *
414  * The caller is responsible for calling flush_cache_vmap() before calling
415  * this function, and flush_tlb_kernel_range after it has returned
416  * successfully (and before the addresses are expected to cause a page fault
417  * or be re-mapped for something else, if TLB flushes are being delayed or
418  * coalesced).
419  *
420  * This is an internal function only. Do not use outside mm/.
421  */
422 void vunmap_range_noflush(unsigned long start, unsigned long end)
423 {
424 	unsigned long next;
425 	pgd_t *pgd;
426 	unsigned long addr = start;
427 	pgtbl_mod_mask mask = 0;
428 
429 	BUG_ON(addr >= end);
430 	pgd = pgd_offset_k(addr);
431 	do {
432 		next = pgd_addr_end(addr, end);
433 		if (pgd_bad(*pgd))
434 			mask |= PGTBL_PGD_MODIFIED;
435 		if (pgd_none_or_clear_bad(pgd))
436 			continue;
437 		vunmap_p4d_range(pgd, addr, next, &mask);
438 	} while (pgd++, addr = next, addr != end);
439 
440 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
441 		arch_sync_kernel_mappings(start, end);
442 }
443 
444 /**
445  * vunmap_range - unmap kernel virtual addresses
446  * @addr: start of the VM area to unmap
447  * @end: end of the VM area to unmap (non-inclusive)
448  *
449  * Clears any present PTEs in the virtual address range, flushes TLBs and
450  * caches. Any subsequent access to the address before it has been re-mapped
451  * is a kernel bug.
452  */
453 void vunmap_range(unsigned long addr, unsigned long end)
454 {
455 	flush_cache_vunmap(addr, end);
456 	vunmap_range_noflush(addr, end);
457 	flush_tlb_kernel_range(addr, end);
458 }
459 
460 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
461 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
462 		pgtbl_mod_mask *mask)
463 {
464 	pte_t *pte;
465 
466 	/*
467 	 * nr is a running index into the array which helps higher level
468 	 * callers keep track of where we're up to.
469 	 */
470 
471 	pte = pte_alloc_kernel_track(pmd, addr, mask);
472 	if (!pte)
473 		return -ENOMEM;
474 	do {
475 		struct page *page = pages[*nr];
476 
477 		if (WARN_ON(!pte_none(*pte)))
478 			return -EBUSY;
479 		if (WARN_ON(!page))
480 			return -ENOMEM;
481 		if (WARN_ON(!pfn_valid(page_to_pfn(page))))
482 			return -EINVAL;
483 
484 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
485 		(*nr)++;
486 	} while (pte++, addr += PAGE_SIZE, addr != end);
487 	*mask |= PGTBL_PTE_MODIFIED;
488 	return 0;
489 }
490 
491 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
492 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
493 		pgtbl_mod_mask *mask)
494 {
495 	pmd_t *pmd;
496 	unsigned long next;
497 
498 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
499 	if (!pmd)
500 		return -ENOMEM;
501 	do {
502 		next = pmd_addr_end(addr, end);
503 		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
504 			return -ENOMEM;
505 	} while (pmd++, addr = next, addr != end);
506 	return 0;
507 }
508 
509 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
510 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
511 		pgtbl_mod_mask *mask)
512 {
513 	pud_t *pud;
514 	unsigned long next;
515 
516 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
517 	if (!pud)
518 		return -ENOMEM;
519 	do {
520 		next = pud_addr_end(addr, end);
521 		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
522 			return -ENOMEM;
523 	} while (pud++, addr = next, addr != end);
524 	return 0;
525 }
526 
527 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
528 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
529 		pgtbl_mod_mask *mask)
530 {
531 	p4d_t *p4d;
532 	unsigned long next;
533 
534 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
535 	if (!p4d)
536 		return -ENOMEM;
537 	do {
538 		next = p4d_addr_end(addr, end);
539 		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
540 			return -ENOMEM;
541 	} while (p4d++, addr = next, addr != end);
542 	return 0;
543 }
544 
545 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
546 		pgprot_t prot, struct page **pages)
547 {
548 	unsigned long start = addr;
549 	pgd_t *pgd;
550 	unsigned long next;
551 	int err = 0;
552 	int nr = 0;
553 	pgtbl_mod_mask mask = 0;
554 
555 	BUG_ON(addr >= end);
556 	pgd = pgd_offset_k(addr);
557 	do {
558 		next = pgd_addr_end(addr, end);
559 		if (pgd_bad(*pgd))
560 			mask |= PGTBL_PGD_MODIFIED;
561 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
562 		if (err)
563 			return err;
564 	} while (pgd++, addr = next, addr != end);
565 
566 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
567 		arch_sync_kernel_mappings(start, end);
568 
569 	return 0;
570 }
571 
572 /*
573  * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
574  * flush caches.
575  *
576  * The caller is responsible for calling flush_cache_vmap() after this
577  * function returns successfully and before the addresses are accessed.
578  *
579  * This is an internal function only. Do not use outside mm/.
580  */
581 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
582 		pgprot_t prot, struct page **pages, unsigned int page_shift)
583 {
584 	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
585 
586 	WARN_ON(page_shift < PAGE_SHIFT);
587 
588 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
589 			page_shift == PAGE_SHIFT)
590 		return vmap_small_pages_range_noflush(addr, end, prot, pages);
591 
592 	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
593 		int err;
594 
595 		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
596 					__pa(page_address(pages[i])), prot,
597 					page_shift);
598 		if (err)
599 			return err;
600 
601 		addr += 1UL << page_shift;
602 	}
603 
604 	return 0;
605 }
606 
607 /**
608  * vmap_pages_range - map pages to a kernel virtual address
609  * @addr: start of the VM area to map
610  * @end: end of the VM area to map (non-inclusive)
611  * @prot: page protection flags to use
612  * @pages: pages to map (always PAGE_SIZE pages)
613  * @page_shift: maximum shift that the pages may be mapped with, @pages must
614  * be aligned and contiguous up to at least this shift.
615  *
616  * RETURNS:
617  * 0 on success, -errno on failure.
618  */
619 static int vmap_pages_range(unsigned long addr, unsigned long end,
620 		pgprot_t prot, struct page **pages, unsigned int page_shift)
621 {
622 	int err;
623 
624 	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
625 	flush_cache_vmap(addr, end);
626 	return err;
627 }
628 
629 int is_vmalloc_or_module_addr(const void *x)
630 {
631 	/*
632 	 * ARM, x86-64 and sparc64 put modules in a special place,
633 	 * and fall back on vmalloc() if that fails. Others
634 	 * just put it in the vmalloc space.
635 	 */
636 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
637 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
638 	if (addr >= MODULES_VADDR && addr < MODULES_END)
639 		return 1;
640 #endif
641 	return is_vmalloc_addr(x);
642 }
643 
644 /*
645  * Walk a vmap address to the struct page it maps. Huge vmap mappings will
646  * return the tail page that corresponds to the base page address, which
647  * matches small vmap mappings.
648  */
649 struct page *vmalloc_to_page(const void *vmalloc_addr)
650 {
651 	unsigned long addr = (unsigned long) vmalloc_addr;
652 	struct page *page = NULL;
653 	pgd_t *pgd = pgd_offset_k(addr);
654 	p4d_t *p4d;
655 	pud_t *pud;
656 	pmd_t *pmd;
657 	pte_t *ptep, pte;
658 
659 	/*
660 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
661 	 * architectures that do not vmalloc module space
662 	 */
663 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
664 
665 	if (pgd_none(*pgd))
666 		return NULL;
667 	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
668 		return NULL; /* XXX: no allowance for huge pgd */
669 	if (WARN_ON_ONCE(pgd_bad(*pgd)))
670 		return NULL;
671 
672 	p4d = p4d_offset(pgd, addr);
673 	if (p4d_none(*p4d))
674 		return NULL;
675 	if (p4d_leaf(*p4d))
676 		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
677 	if (WARN_ON_ONCE(p4d_bad(*p4d)))
678 		return NULL;
679 
680 	pud = pud_offset(p4d, addr);
681 	if (pud_none(*pud))
682 		return NULL;
683 	if (pud_leaf(*pud))
684 		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
685 	if (WARN_ON_ONCE(pud_bad(*pud)))
686 		return NULL;
687 
688 	pmd = pmd_offset(pud, addr);
689 	if (pmd_none(*pmd))
690 		return NULL;
691 	if (pmd_leaf(*pmd))
692 		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
693 	if (WARN_ON_ONCE(pmd_bad(*pmd)))
694 		return NULL;
695 
696 	ptep = pte_offset_map(pmd, addr);
697 	pte = *ptep;
698 	if (pte_present(pte))
699 		page = pte_page(pte);
700 	pte_unmap(ptep);
701 
702 	return page;
703 }
704 EXPORT_SYMBOL(vmalloc_to_page);
705 
706 /*
707  * Map a vmalloc()-space virtual address to the physical page frame number.
708  */
709 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
710 {
711 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
712 }
713 EXPORT_SYMBOL(vmalloc_to_pfn);
714 
715 
716 /*** Global kva allocator ***/
717 
718 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
719 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
720 
721 
722 static DEFINE_SPINLOCK(vmap_area_lock);
723 static DEFINE_SPINLOCK(free_vmap_area_lock);
724 /* Export for kexec only */
725 LIST_HEAD(vmap_area_list);
726 static struct rb_root vmap_area_root = RB_ROOT;
727 static bool vmap_initialized __read_mostly;
728 
729 static struct rb_root purge_vmap_area_root = RB_ROOT;
730 static LIST_HEAD(purge_vmap_area_list);
731 static DEFINE_SPINLOCK(purge_vmap_area_lock);
732 
733 /*
734  * This kmem_cache is used for vmap_area objects. Instead of
735  * allocating from slab we reuse an object from this cache to
736  * make things faster. Especially in "no edge" splitting of
737  * free block.
738  */
739 static struct kmem_cache *vmap_area_cachep;
740 
741 /*
742  * This linked list is used in pair with free_vmap_area_root.
743  * It gives O(1) access to prev/next to perform fast coalescing.
744  */
745 static LIST_HEAD(free_vmap_area_list);
746 
747 /*
748  * This augment red-black tree represents the free vmap space.
749  * All vmap_area objects in this tree are sorted by va->va_start
750  * address. It is used for allocation and merging when a vmap
751  * object is released.
752  *
753  * Each vmap_area node contains a maximum available free block
754  * of its sub-tree, right or left. Therefore it is possible to
755  * find a lowest match of free area.
756  */
757 static struct rb_root free_vmap_area_root = RB_ROOT;
758 
759 /*
760  * Preload a CPU with one object for "no edge" split case. The
761  * aim is to get rid of allocations from the atomic context, thus
762  * to use more permissive allocation masks.
763  */
764 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
765 
766 static __always_inline unsigned long
767 va_size(struct vmap_area *va)
768 {
769 	return (va->va_end - va->va_start);
770 }
771 
772 static __always_inline unsigned long
773 get_subtree_max_size(struct rb_node *node)
774 {
775 	struct vmap_area *va;
776 
777 	va = rb_entry_safe(node, struct vmap_area, rb_node);
778 	return va ? va->subtree_max_size : 0;
779 }
780 
781 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
782 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
783 
784 static void purge_vmap_area_lazy(void);
785 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
786 static void drain_vmap_area_work(struct work_struct *work);
787 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
788 
789 static atomic_long_t nr_vmalloc_pages;
790 
791 unsigned long vmalloc_nr_pages(void)
792 {
793 	return atomic_long_read(&nr_vmalloc_pages);
794 }
795 
796 static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
797 {
798 	struct vmap_area *va = NULL;
799 	struct rb_node *n = vmap_area_root.rb_node;
800 
801 	addr = (unsigned long)kasan_reset_tag((void *)addr);
802 
803 	while (n) {
804 		struct vmap_area *tmp;
805 
806 		tmp = rb_entry(n, struct vmap_area, rb_node);
807 		if (tmp->va_end > addr) {
808 			va = tmp;
809 			if (tmp->va_start <= addr)
810 				break;
811 
812 			n = n->rb_left;
813 		} else
814 			n = n->rb_right;
815 	}
816 
817 	return va;
818 }
819 
820 static struct vmap_area *__find_vmap_area(unsigned long addr)
821 {
822 	struct rb_node *n = vmap_area_root.rb_node;
823 
824 	addr = (unsigned long)kasan_reset_tag((void *)addr);
825 
826 	while (n) {
827 		struct vmap_area *va;
828 
829 		va = rb_entry(n, struct vmap_area, rb_node);
830 		if (addr < va->va_start)
831 			n = n->rb_left;
832 		else if (addr >= va->va_end)
833 			n = n->rb_right;
834 		else
835 			return va;
836 	}
837 
838 	return NULL;
839 }
840 
841 /*
842  * This function returns back addresses of parent node
843  * and its left or right link for further processing.
844  *
845  * Otherwise NULL is returned. In that case all further
846  * steps regarding inserting of conflicting overlap range
847  * have to be declined and actually considered as a bug.
848  */
849 static __always_inline struct rb_node **
850 find_va_links(struct vmap_area *va,
851 	struct rb_root *root, struct rb_node *from,
852 	struct rb_node **parent)
853 {
854 	struct vmap_area *tmp_va;
855 	struct rb_node **link;
856 
857 	if (root) {
858 		link = &root->rb_node;
859 		if (unlikely(!*link)) {
860 			*parent = NULL;
861 			return link;
862 		}
863 	} else {
864 		link = &from;
865 	}
866 
867 	/*
868 	 * Go to the bottom of the tree. When we hit the last point
869 	 * we end up with parent rb_node and correct direction, i name
870 	 * it link, where the new va->rb_node will be attached to.
871 	 */
872 	do {
873 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
874 
875 		/*
876 		 * During the traversal we also do some sanity check.
877 		 * Trigger the BUG() if there are sides(left/right)
878 		 * or full overlaps.
879 		 */
880 		if (va->va_start < tmp_va->va_end &&
881 				va->va_end <= tmp_va->va_start)
882 			link = &(*link)->rb_left;
883 		else if (va->va_end > tmp_va->va_start &&
884 				va->va_start >= tmp_va->va_end)
885 			link = &(*link)->rb_right;
886 		else {
887 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
888 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
889 
890 			return NULL;
891 		}
892 	} while (*link);
893 
894 	*parent = &tmp_va->rb_node;
895 	return link;
896 }
897 
898 static __always_inline struct list_head *
899 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
900 {
901 	struct list_head *list;
902 
903 	if (unlikely(!parent))
904 		/*
905 		 * The red-black tree where we try to find VA neighbors
906 		 * before merging or inserting is empty, i.e. it means
907 		 * there is no free vmap space. Normally it does not
908 		 * happen but we handle this case anyway.
909 		 */
910 		return NULL;
911 
912 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
913 	return (&parent->rb_right == link ? list->next : list);
914 }
915 
916 static __always_inline void
917 link_va(struct vmap_area *va, struct rb_root *root,
918 	struct rb_node *parent, struct rb_node **link, struct list_head *head)
919 {
920 	/*
921 	 * VA is still not in the list, but we can
922 	 * identify its future previous list_head node.
923 	 */
924 	if (likely(parent)) {
925 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
926 		if (&parent->rb_right != link)
927 			head = head->prev;
928 	}
929 
930 	/* Insert to the rb-tree */
931 	rb_link_node(&va->rb_node, parent, link);
932 	if (root == &free_vmap_area_root) {
933 		/*
934 		 * Some explanation here. Just perform simple insertion
935 		 * to the tree. We do not set va->subtree_max_size to
936 		 * its current size before calling rb_insert_augmented().
937 		 * It is because of we populate the tree from the bottom
938 		 * to parent levels when the node _is_ in the tree.
939 		 *
940 		 * Therefore we set subtree_max_size to zero after insertion,
941 		 * to let __augment_tree_propagate_from() puts everything to
942 		 * the correct order later on.
943 		 */
944 		rb_insert_augmented(&va->rb_node,
945 			root, &free_vmap_area_rb_augment_cb);
946 		va->subtree_max_size = 0;
947 	} else {
948 		rb_insert_color(&va->rb_node, root);
949 	}
950 
951 	/* Address-sort this list */
952 	list_add(&va->list, head);
953 }
954 
955 static __always_inline void
956 unlink_va(struct vmap_area *va, struct rb_root *root)
957 {
958 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
959 		return;
960 
961 	if (root == &free_vmap_area_root)
962 		rb_erase_augmented(&va->rb_node,
963 			root, &free_vmap_area_rb_augment_cb);
964 	else
965 		rb_erase(&va->rb_node, root);
966 
967 	list_del(&va->list);
968 	RB_CLEAR_NODE(&va->rb_node);
969 }
970 
971 #if DEBUG_AUGMENT_PROPAGATE_CHECK
972 /*
973  * Gets called when remove the node and rotate.
974  */
975 static __always_inline unsigned long
976 compute_subtree_max_size(struct vmap_area *va)
977 {
978 	return max3(va_size(va),
979 		get_subtree_max_size(va->rb_node.rb_left),
980 		get_subtree_max_size(va->rb_node.rb_right));
981 }
982 
983 static void
984 augment_tree_propagate_check(void)
985 {
986 	struct vmap_area *va;
987 	unsigned long computed_size;
988 
989 	list_for_each_entry(va, &free_vmap_area_list, list) {
990 		computed_size = compute_subtree_max_size(va);
991 		if (computed_size != va->subtree_max_size)
992 			pr_emerg("tree is corrupted: %lu, %lu\n",
993 				va_size(va), va->subtree_max_size);
994 	}
995 }
996 #endif
997 
998 /*
999  * This function populates subtree_max_size from bottom to upper
1000  * levels starting from VA point. The propagation must be done
1001  * when VA size is modified by changing its va_start/va_end. Or
1002  * in case of newly inserting of VA to the tree.
1003  *
1004  * It means that __augment_tree_propagate_from() must be called:
1005  * - After VA has been inserted to the tree(free path);
1006  * - After VA has been shrunk(allocation path);
1007  * - After VA has been increased(merging path).
1008  *
1009  * Please note that, it does not mean that upper parent nodes
1010  * and their subtree_max_size are recalculated all the time up
1011  * to the root node.
1012  *
1013  *       4--8
1014  *        /\
1015  *       /  \
1016  *      /    \
1017  *    2--2  8--8
1018  *
1019  * For example if we modify the node 4, shrinking it to 2, then
1020  * no any modification is required. If we shrink the node 2 to 1
1021  * its subtree_max_size is updated only, and set to 1. If we shrink
1022  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1023  * node becomes 4--6.
1024  */
1025 static __always_inline void
1026 augment_tree_propagate_from(struct vmap_area *va)
1027 {
1028 	/*
1029 	 * Populate the tree from bottom towards the root until
1030 	 * the calculated maximum available size of checked node
1031 	 * is equal to its current one.
1032 	 */
1033 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1034 
1035 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1036 	augment_tree_propagate_check();
1037 #endif
1038 }
1039 
1040 static void
1041 insert_vmap_area(struct vmap_area *va,
1042 	struct rb_root *root, struct list_head *head)
1043 {
1044 	struct rb_node **link;
1045 	struct rb_node *parent;
1046 
1047 	link = find_va_links(va, root, NULL, &parent);
1048 	if (link)
1049 		link_va(va, root, parent, link, head);
1050 }
1051 
1052 static void
1053 insert_vmap_area_augment(struct vmap_area *va,
1054 	struct rb_node *from, struct rb_root *root,
1055 	struct list_head *head)
1056 {
1057 	struct rb_node **link;
1058 	struct rb_node *parent;
1059 
1060 	if (from)
1061 		link = find_va_links(va, NULL, from, &parent);
1062 	else
1063 		link = find_va_links(va, root, NULL, &parent);
1064 
1065 	if (link) {
1066 		link_va(va, root, parent, link, head);
1067 		augment_tree_propagate_from(va);
1068 	}
1069 }
1070 
1071 /*
1072  * Merge de-allocated chunk of VA memory with previous
1073  * and next free blocks. If coalesce is not done a new
1074  * free area is inserted. If VA has been merged, it is
1075  * freed.
1076  *
1077  * Please note, it can return NULL in case of overlap
1078  * ranges, followed by WARN() report. Despite it is a
1079  * buggy behaviour, a system can be alive and keep
1080  * ongoing.
1081  */
1082 static __always_inline struct vmap_area *
1083 merge_or_add_vmap_area(struct vmap_area *va,
1084 	struct rb_root *root, struct list_head *head)
1085 {
1086 	struct vmap_area *sibling;
1087 	struct list_head *next;
1088 	struct rb_node **link;
1089 	struct rb_node *parent;
1090 	bool merged = false;
1091 
1092 	/*
1093 	 * Find a place in the tree where VA potentially will be
1094 	 * inserted, unless it is merged with its sibling/siblings.
1095 	 */
1096 	link = find_va_links(va, root, NULL, &parent);
1097 	if (!link)
1098 		return NULL;
1099 
1100 	/*
1101 	 * Get next node of VA to check if merging can be done.
1102 	 */
1103 	next = get_va_next_sibling(parent, link);
1104 	if (unlikely(next == NULL))
1105 		goto insert;
1106 
1107 	/*
1108 	 * start            end
1109 	 * |                |
1110 	 * |<------VA------>|<-----Next----->|
1111 	 *                  |                |
1112 	 *                  start            end
1113 	 */
1114 	if (next != head) {
1115 		sibling = list_entry(next, struct vmap_area, list);
1116 		if (sibling->va_start == va->va_end) {
1117 			sibling->va_start = va->va_start;
1118 
1119 			/* Free vmap_area object. */
1120 			kmem_cache_free(vmap_area_cachep, va);
1121 
1122 			/* Point to the new merged area. */
1123 			va = sibling;
1124 			merged = true;
1125 		}
1126 	}
1127 
1128 	/*
1129 	 * start            end
1130 	 * |                |
1131 	 * |<-----Prev----->|<------VA------>|
1132 	 *                  |                |
1133 	 *                  start            end
1134 	 */
1135 	if (next->prev != head) {
1136 		sibling = list_entry(next->prev, struct vmap_area, list);
1137 		if (sibling->va_end == va->va_start) {
1138 			/*
1139 			 * If both neighbors are coalesced, it is important
1140 			 * to unlink the "next" node first, followed by merging
1141 			 * with "previous" one. Otherwise the tree might not be
1142 			 * fully populated if a sibling's augmented value is
1143 			 * "normalized" because of rotation operations.
1144 			 */
1145 			if (merged)
1146 				unlink_va(va, root);
1147 
1148 			sibling->va_end = va->va_end;
1149 
1150 			/* Free vmap_area object. */
1151 			kmem_cache_free(vmap_area_cachep, va);
1152 
1153 			/* Point to the new merged area. */
1154 			va = sibling;
1155 			merged = true;
1156 		}
1157 	}
1158 
1159 insert:
1160 	if (!merged)
1161 		link_va(va, root, parent, link, head);
1162 
1163 	return va;
1164 }
1165 
1166 static __always_inline struct vmap_area *
1167 merge_or_add_vmap_area_augment(struct vmap_area *va,
1168 	struct rb_root *root, struct list_head *head)
1169 {
1170 	va = merge_or_add_vmap_area(va, root, head);
1171 	if (va)
1172 		augment_tree_propagate_from(va);
1173 
1174 	return va;
1175 }
1176 
1177 static __always_inline bool
1178 is_within_this_va(struct vmap_area *va, unsigned long size,
1179 	unsigned long align, unsigned long vstart)
1180 {
1181 	unsigned long nva_start_addr;
1182 
1183 	if (va->va_start > vstart)
1184 		nva_start_addr = ALIGN(va->va_start, align);
1185 	else
1186 		nva_start_addr = ALIGN(vstart, align);
1187 
1188 	/* Can be overflowed due to big size or alignment. */
1189 	if (nva_start_addr + size < nva_start_addr ||
1190 			nva_start_addr < vstart)
1191 		return false;
1192 
1193 	return (nva_start_addr + size <= va->va_end);
1194 }
1195 
1196 /*
1197  * Find the first free block(lowest start address) in the tree,
1198  * that will accomplish the request corresponding to passing
1199  * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1200  * a search length is adjusted to account for worst case alignment
1201  * overhead.
1202  */
1203 static __always_inline struct vmap_area *
1204 find_vmap_lowest_match(unsigned long size, unsigned long align,
1205 	unsigned long vstart, bool adjust_search_size)
1206 {
1207 	struct vmap_area *va;
1208 	struct rb_node *node;
1209 	unsigned long length;
1210 
1211 	/* Start from the root. */
1212 	node = free_vmap_area_root.rb_node;
1213 
1214 	/* Adjust the search size for alignment overhead. */
1215 	length = adjust_search_size ? size + align - 1 : size;
1216 
1217 	while (node) {
1218 		va = rb_entry(node, struct vmap_area, rb_node);
1219 
1220 		if (get_subtree_max_size(node->rb_left) >= length &&
1221 				vstart < va->va_start) {
1222 			node = node->rb_left;
1223 		} else {
1224 			if (is_within_this_va(va, size, align, vstart))
1225 				return va;
1226 
1227 			/*
1228 			 * Does not make sense to go deeper towards the right
1229 			 * sub-tree if it does not have a free block that is
1230 			 * equal or bigger to the requested search length.
1231 			 */
1232 			if (get_subtree_max_size(node->rb_right) >= length) {
1233 				node = node->rb_right;
1234 				continue;
1235 			}
1236 
1237 			/*
1238 			 * OK. We roll back and find the first right sub-tree,
1239 			 * that will satisfy the search criteria. It can happen
1240 			 * due to "vstart" restriction or an alignment overhead
1241 			 * that is bigger then PAGE_SIZE.
1242 			 */
1243 			while ((node = rb_parent(node))) {
1244 				va = rb_entry(node, struct vmap_area, rb_node);
1245 				if (is_within_this_va(va, size, align, vstart))
1246 					return va;
1247 
1248 				if (get_subtree_max_size(node->rb_right) >= length &&
1249 						vstart <= va->va_start) {
1250 					/*
1251 					 * Shift the vstart forward. Please note, we update it with
1252 					 * parent's start address adding "1" because we do not want
1253 					 * to enter same sub-tree after it has already been checked
1254 					 * and no suitable free block found there.
1255 					 */
1256 					vstart = va->va_start + 1;
1257 					node = node->rb_right;
1258 					break;
1259 				}
1260 			}
1261 		}
1262 	}
1263 
1264 	return NULL;
1265 }
1266 
1267 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1268 #include <linux/random.h>
1269 
1270 static struct vmap_area *
1271 find_vmap_lowest_linear_match(unsigned long size,
1272 	unsigned long align, unsigned long vstart)
1273 {
1274 	struct vmap_area *va;
1275 
1276 	list_for_each_entry(va, &free_vmap_area_list, list) {
1277 		if (!is_within_this_va(va, size, align, vstart))
1278 			continue;
1279 
1280 		return va;
1281 	}
1282 
1283 	return NULL;
1284 }
1285 
1286 static void
1287 find_vmap_lowest_match_check(unsigned long size, unsigned long align)
1288 {
1289 	struct vmap_area *va_1, *va_2;
1290 	unsigned long vstart;
1291 	unsigned int rnd;
1292 
1293 	get_random_bytes(&rnd, sizeof(rnd));
1294 	vstart = VMALLOC_START + rnd;
1295 
1296 	va_1 = find_vmap_lowest_match(size, align, vstart, false);
1297 	va_2 = find_vmap_lowest_linear_match(size, align, vstart);
1298 
1299 	if (va_1 != va_2)
1300 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1301 			va_1, va_2, vstart);
1302 }
1303 #endif
1304 
1305 enum fit_type {
1306 	NOTHING_FIT = 0,
1307 	FL_FIT_TYPE = 1,	/* full fit */
1308 	LE_FIT_TYPE = 2,	/* left edge fit */
1309 	RE_FIT_TYPE = 3,	/* right edge fit */
1310 	NE_FIT_TYPE = 4		/* no edge fit */
1311 };
1312 
1313 static __always_inline enum fit_type
1314 classify_va_fit_type(struct vmap_area *va,
1315 	unsigned long nva_start_addr, unsigned long size)
1316 {
1317 	enum fit_type type;
1318 
1319 	/* Check if it is within VA. */
1320 	if (nva_start_addr < va->va_start ||
1321 			nva_start_addr + size > va->va_end)
1322 		return NOTHING_FIT;
1323 
1324 	/* Now classify. */
1325 	if (va->va_start == nva_start_addr) {
1326 		if (va->va_end == nva_start_addr + size)
1327 			type = FL_FIT_TYPE;
1328 		else
1329 			type = LE_FIT_TYPE;
1330 	} else if (va->va_end == nva_start_addr + size) {
1331 		type = RE_FIT_TYPE;
1332 	} else {
1333 		type = NE_FIT_TYPE;
1334 	}
1335 
1336 	return type;
1337 }
1338 
1339 static __always_inline int
1340 adjust_va_to_fit_type(struct vmap_area *va,
1341 	unsigned long nva_start_addr, unsigned long size,
1342 	enum fit_type type)
1343 {
1344 	struct vmap_area *lva = NULL;
1345 
1346 	if (type == FL_FIT_TYPE) {
1347 		/*
1348 		 * No need to split VA, it fully fits.
1349 		 *
1350 		 * |               |
1351 		 * V      NVA      V
1352 		 * |---------------|
1353 		 */
1354 		unlink_va(va, &free_vmap_area_root);
1355 		kmem_cache_free(vmap_area_cachep, va);
1356 	} else if (type == LE_FIT_TYPE) {
1357 		/*
1358 		 * Split left edge of fit VA.
1359 		 *
1360 		 * |       |
1361 		 * V  NVA  V   R
1362 		 * |-------|-------|
1363 		 */
1364 		va->va_start += size;
1365 	} else if (type == RE_FIT_TYPE) {
1366 		/*
1367 		 * Split right edge of fit VA.
1368 		 *
1369 		 *         |       |
1370 		 *     L   V  NVA  V
1371 		 * |-------|-------|
1372 		 */
1373 		va->va_end = nva_start_addr;
1374 	} else if (type == NE_FIT_TYPE) {
1375 		/*
1376 		 * Split no edge of fit VA.
1377 		 *
1378 		 *     |       |
1379 		 *   L V  NVA  V R
1380 		 * |---|-------|---|
1381 		 */
1382 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1383 		if (unlikely(!lva)) {
1384 			/*
1385 			 * For percpu allocator we do not do any pre-allocation
1386 			 * and leave it as it is. The reason is it most likely
1387 			 * never ends up with NE_FIT_TYPE splitting. In case of
1388 			 * percpu allocations offsets and sizes are aligned to
1389 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1390 			 * are its main fitting cases.
1391 			 *
1392 			 * There are a few exceptions though, as an example it is
1393 			 * a first allocation (early boot up) when we have "one"
1394 			 * big free space that has to be split.
1395 			 *
1396 			 * Also we can hit this path in case of regular "vmap"
1397 			 * allocations, if "this" current CPU was not preloaded.
1398 			 * See the comment in alloc_vmap_area() why. If so, then
1399 			 * GFP_NOWAIT is used instead to get an extra object for
1400 			 * split purpose. That is rare and most time does not
1401 			 * occur.
1402 			 *
1403 			 * What happens if an allocation gets failed. Basically,
1404 			 * an "overflow" path is triggered to purge lazily freed
1405 			 * areas to free some memory, then, the "retry" path is
1406 			 * triggered to repeat one more time. See more details
1407 			 * in alloc_vmap_area() function.
1408 			 */
1409 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1410 			if (!lva)
1411 				return -1;
1412 		}
1413 
1414 		/*
1415 		 * Build the remainder.
1416 		 */
1417 		lva->va_start = va->va_start;
1418 		lva->va_end = nva_start_addr;
1419 
1420 		/*
1421 		 * Shrink this VA to remaining size.
1422 		 */
1423 		va->va_start = nva_start_addr + size;
1424 	} else {
1425 		return -1;
1426 	}
1427 
1428 	if (type != FL_FIT_TYPE) {
1429 		augment_tree_propagate_from(va);
1430 
1431 		if (lva)	/* type == NE_FIT_TYPE */
1432 			insert_vmap_area_augment(lva, &va->rb_node,
1433 				&free_vmap_area_root, &free_vmap_area_list);
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 /*
1440  * Returns a start address of the newly allocated area, if success.
1441  * Otherwise a vend is returned that indicates failure.
1442  */
1443 static __always_inline unsigned long
1444 __alloc_vmap_area(unsigned long size, unsigned long align,
1445 	unsigned long vstart, unsigned long vend)
1446 {
1447 	bool adjust_search_size = true;
1448 	unsigned long nva_start_addr;
1449 	struct vmap_area *va;
1450 	enum fit_type type;
1451 	int ret;
1452 
1453 	/*
1454 	 * Do not adjust when:
1455 	 *   a) align <= PAGE_SIZE, because it does not make any sense.
1456 	 *      All blocks(their start addresses) are at least PAGE_SIZE
1457 	 *      aligned anyway;
1458 	 *   b) a short range where a requested size corresponds to exactly
1459 	 *      specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1460 	 *      With adjusted search length an allocation would not succeed.
1461 	 */
1462 	if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1463 		adjust_search_size = false;
1464 
1465 	va = find_vmap_lowest_match(size, align, vstart, adjust_search_size);
1466 	if (unlikely(!va))
1467 		return vend;
1468 
1469 	if (va->va_start > vstart)
1470 		nva_start_addr = ALIGN(va->va_start, align);
1471 	else
1472 		nva_start_addr = ALIGN(vstart, align);
1473 
1474 	/* Check the "vend" restriction. */
1475 	if (nva_start_addr + size > vend)
1476 		return vend;
1477 
1478 	/* Classify what we have found. */
1479 	type = classify_va_fit_type(va, nva_start_addr, size);
1480 	if (WARN_ON_ONCE(type == NOTHING_FIT))
1481 		return vend;
1482 
1483 	/* Update the free vmap_area. */
1484 	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1485 	if (ret)
1486 		return vend;
1487 
1488 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1489 	find_vmap_lowest_match_check(size, align);
1490 #endif
1491 
1492 	return nva_start_addr;
1493 }
1494 
1495 /*
1496  * Free a region of KVA allocated by alloc_vmap_area
1497  */
1498 static void free_vmap_area(struct vmap_area *va)
1499 {
1500 	/*
1501 	 * Remove from the busy tree/list.
1502 	 */
1503 	spin_lock(&vmap_area_lock);
1504 	unlink_va(va, &vmap_area_root);
1505 	spin_unlock(&vmap_area_lock);
1506 
1507 	/*
1508 	 * Insert/Merge it back to the free tree/list.
1509 	 */
1510 	spin_lock(&free_vmap_area_lock);
1511 	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1512 	spin_unlock(&free_vmap_area_lock);
1513 }
1514 
1515 static inline void
1516 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1517 {
1518 	struct vmap_area *va = NULL;
1519 
1520 	/*
1521 	 * Preload this CPU with one extra vmap_area object. It is used
1522 	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1523 	 * a CPU that does an allocation is preloaded.
1524 	 *
1525 	 * We do it in non-atomic context, thus it allows us to use more
1526 	 * permissive allocation masks to be more stable under low memory
1527 	 * condition and high memory pressure.
1528 	 */
1529 	if (!this_cpu_read(ne_fit_preload_node))
1530 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1531 
1532 	spin_lock(lock);
1533 
1534 	if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1535 		kmem_cache_free(vmap_area_cachep, va);
1536 }
1537 
1538 /*
1539  * Allocate a region of KVA of the specified size and alignment, within the
1540  * vstart and vend.
1541  */
1542 static struct vmap_area *alloc_vmap_area(unsigned long size,
1543 				unsigned long align,
1544 				unsigned long vstart, unsigned long vend,
1545 				int node, gfp_t gfp_mask)
1546 {
1547 	struct vmap_area *va;
1548 	unsigned long freed;
1549 	unsigned long addr;
1550 	int purged = 0;
1551 	int ret;
1552 
1553 	BUG_ON(!size);
1554 	BUG_ON(offset_in_page(size));
1555 	BUG_ON(!is_power_of_2(align));
1556 
1557 	if (unlikely(!vmap_initialized))
1558 		return ERR_PTR(-EBUSY);
1559 
1560 	might_sleep();
1561 	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1562 
1563 	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1564 	if (unlikely(!va))
1565 		return ERR_PTR(-ENOMEM);
1566 
1567 	/*
1568 	 * Only scan the relevant parts containing pointers to other objects
1569 	 * to avoid false negatives.
1570 	 */
1571 	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1572 
1573 retry:
1574 	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1575 	addr = __alloc_vmap_area(size, align, vstart, vend);
1576 	spin_unlock(&free_vmap_area_lock);
1577 
1578 	/*
1579 	 * If an allocation fails, the "vend" address is
1580 	 * returned. Therefore trigger the overflow path.
1581 	 */
1582 	if (unlikely(addr == vend))
1583 		goto overflow;
1584 
1585 	va->va_start = addr;
1586 	va->va_end = addr + size;
1587 	va->vm = NULL;
1588 
1589 	spin_lock(&vmap_area_lock);
1590 	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1591 	spin_unlock(&vmap_area_lock);
1592 
1593 	BUG_ON(!IS_ALIGNED(va->va_start, align));
1594 	BUG_ON(va->va_start < vstart);
1595 	BUG_ON(va->va_end > vend);
1596 
1597 	ret = kasan_populate_vmalloc(addr, size);
1598 	if (ret) {
1599 		free_vmap_area(va);
1600 		return ERR_PTR(ret);
1601 	}
1602 
1603 	return va;
1604 
1605 overflow:
1606 	if (!purged) {
1607 		purge_vmap_area_lazy();
1608 		purged = 1;
1609 		goto retry;
1610 	}
1611 
1612 	freed = 0;
1613 	blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1614 
1615 	if (freed > 0) {
1616 		purged = 0;
1617 		goto retry;
1618 	}
1619 
1620 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1621 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1622 			size);
1623 
1624 	kmem_cache_free(vmap_area_cachep, va);
1625 	return ERR_PTR(-EBUSY);
1626 }
1627 
1628 int register_vmap_purge_notifier(struct notifier_block *nb)
1629 {
1630 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1631 }
1632 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1633 
1634 int unregister_vmap_purge_notifier(struct notifier_block *nb)
1635 {
1636 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1637 }
1638 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1639 
1640 /*
1641  * lazy_max_pages is the maximum amount of virtual address space we gather up
1642  * before attempting to purge with a TLB flush.
1643  *
1644  * There is a tradeoff here: a larger number will cover more kernel page tables
1645  * and take slightly longer to purge, but it will linearly reduce the number of
1646  * global TLB flushes that must be performed. It would seem natural to scale
1647  * this number up linearly with the number of CPUs (because vmapping activity
1648  * could also scale linearly with the number of CPUs), however it is likely
1649  * that in practice, workloads might be constrained in other ways that mean
1650  * vmap activity will not scale linearly with CPUs. Also, I want to be
1651  * conservative and not introduce a big latency on huge systems, so go with
1652  * a less aggressive log scale. It will still be an improvement over the old
1653  * code, and it will be simple to change the scale factor if we find that it
1654  * becomes a problem on bigger systems.
1655  */
1656 static unsigned long lazy_max_pages(void)
1657 {
1658 	unsigned int log;
1659 
1660 	log = fls(num_online_cpus());
1661 
1662 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1663 }
1664 
1665 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1666 
1667 /*
1668  * Serialize vmap purging.  There is no actual critical section protected
1669  * by this look, but we want to avoid concurrent calls for performance
1670  * reasons and to make the pcpu_get_vm_areas more deterministic.
1671  */
1672 static DEFINE_MUTEX(vmap_purge_lock);
1673 
1674 /* for per-CPU blocks */
1675 static void purge_fragmented_blocks_allcpus(void);
1676 
1677 /*
1678  * Purges all lazily-freed vmap areas.
1679  */
1680 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1681 {
1682 	unsigned long resched_threshold;
1683 	struct list_head local_pure_list;
1684 	struct vmap_area *va, *n_va;
1685 
1686 	lockdep_assert_held(&vmap_purge_lock);
1687 
1688 	spin_lock(&purge_vmap_area_lock);
1689 	purge_vmap_area_root = RB_ROOT;
1690 	list_replace_init(&purge_vmap_area_list, &local_pure_list);
1691 	spin_unlock(&purge_vmap_area_lock);
1692 
1693 	if (unlikely(list_empty(&local_pure_list)))
1694 		return false;
1695 
1696 	start = min(start,
1697 		list_first_entry(&local_pure_list,
1698 			struct vmap_area, list)->va_start);
1699 
1700 	end = max(end,
1701 		list_last_entry(&local_pure_list,
1702 			struct vmap_area, list)->va_end);
1703 
1704 	flush_tlb_kernel_range(start, end);
1705 	resched_threshold = lazy_max_pages() << 1;
1706 
1707 	spin_lock(&free_vmap_area_lock);
1708 	list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
1709 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1710 		unsigned long orig_start = va->va_start;
1711 		unsigned long orig_end = va->va_end;
1712 
1713 		/*
1714 		 * Finally insert or merge lazily-freed area. It is
1715 		 * detached and there is no need to "unlink" it from
1716 		 * anything.
1717 		 */
1718 		va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1719 				&free_vmap_area_list);
1720 
1721 		if (!va)
1722 			continue;
1723 
1724 		if (is_vmalloc_or_module_addr((void *)orig_start))
1725 			kasan_release_vmalloc(orig_start, orig_end,
1726 					      va->va_start, va->va_end);
1727 
1728 		atomic_long_sub(nr, &vmap_lazy_nr);
1729 
1730 		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1731 			cond_resched_lock(&free_vmap_area_lock);
1732 	}
1733 	spin_unlock(&free_vmap_area_lock);
1734 	return true;
1735 }
1736 
1737 /*
1738  * Kick off a purge of the outstanding lazy areas.
1739  */
1740 static void purge_vmap_area_lazy(void)
1741 {
1742 	mutex_lock(&vmap_purge_lock);
1743 	purge_fragmented_blocks_allcpus();
1744 	__purge_vmap_area_lazy(ULONG_MAX, 0);
1745 	mutex_unlock(&vmap_purge_lock);
1746 }
1747 
1748 static void drain_vmap_area_work(struct work_struct *work)
1749 {
1750 	unsigned long nr_lazy;
1751 
1752 	do {
1753 		mutex_lock(&vmap_purge_lock);
1754 		__purge_vmap_area_lazy(ULONG_MAX, 0);
1755 		mutex_unlock(&vmap_purge_lock);
1756 
1757 		/* Recheck if further work is required. */
1758 		nr_lazy = atomic_long_read(&vmap_lazy_nr);
1759 	} while (nr_lazy > lazy_max_pages());
1760 }
1761 
1762 /*
1763  * Free a vmap area, caller ensuring that the area has been unmapped
1764  * and flush_cache_vunmap had been called for the correct range
1765  * previously.
1766  */
1767 static void free_vmap_area_noflush(struct vmap_area *va)
1768 {
1769 	unsigned long nr_lazy;
1770 
1771 	spin_lock(&vmap_area_lock);
1772 	unlink_va(va, &vmap_area_root);
1773 	spin_unlock(&vmap_area_lock);
1774 
1775 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1776 				PAGE_SHIFT, &vmap_lazy_nr);
1777 
1778 	/*
1779 	 * Merge or place it to the purge tree/list.
1780 	 */
1781 	spin_lock(&purge_vmap_area_lock);
1782 	merge_or_add_vmap_area(va,
1783 		&purge_vmap_area_root, &purge_vmap_area_list);
1784 	spin_unlock(&purge_vmap_area_lock);
1785 
1786 	/* After this point, we may free va at any time */
1787 	if (unlikely(nr_lazy > lazy_max_pages()))
1788 		schedule_work(&drain_vmap_work);
1789 }
1790 
1791 /*
1792  * Free and unmap a vmap area
1793  */
1794 static void free_unmap_vmap_area(struct vmap_area *va)
1795 {
1796 	flush_cache_vunmap(va->va_start, va->va_end);
1797 	vunmap_range_noflush(va->va_start, va->va_end);
1798 	if (debug_pagealloc_enabled_static())
1799 		flush_tlb_kernel_range(va->va_start, va->va_end);
1800 
1801 	free_vmap_area_noflush(va);
1802 }
1803 
1804 static struct vmap_area *find_vmap_area(unsigned long addr)
1805 {
1806 	struct vmap_area *va;
1807 
1808 	spin_lock(&vmap_area_lock);
1809 	va = __find_vmap_area(addr);
1810 	spin_unlock(&vmap_area_lock);
1811 
1812 	return va;
1813 }
1814 
1815 /*** Per cpu kva allocator ***/
1816 
1817 /*
1818  * vmap space is limited especially on 32 bit architectures. Ensure there is
1819  * room for at least 16 percpu vmap blocks per CPU.
1820  */
1821 /*
1822  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1823  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1824  * instead (we just need a rough idea)
1825  */
1826 #if BITS_PER_LONG == 32
1827 #define VMALLOC_SPACE		(128UL*1024*1024)
1828 #else
1829 #define VMALLOC_SPACE		(128UL*1024*1024*1024)
1830 #endif
1831 
1832 #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1833 #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1834 #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1835 #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1836 #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1837 #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1838 #define VMAP_BBMAP_BITS		\
1839 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1840 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1841 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1842 
1843 #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1844 
1845 struct vmap_block_queue {
1846 	spinlock_t lock;
1847 	struct list_head free;
1848 };
1849 
1850 struct vmap_block {
1851 	spinlock_t lock;
1852 	struct vmap_area *va;
1853 	unsigned long free, dirty;
1854 	unsigned long dirty_min, dirty_max; /*< dirty range */
1855 	struct list_head free_list;
1856 	struct rcu_head rcu_head;
1857 	struct list_head purge;
1858 };
1859 
1860 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1861 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1862 
1863 /*
1864  * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1865  * in the free path. Could get rid of this if we change the API to return a
1866  * "cookie" from alloc, to be passed to free. But no big deal yet.
1867  */
1868 static DEFINE_XARRAY(vmap_blocks);
1869 
1870 /*
1871  * We should probably have a fallback mechanism to allocate virtual memory
1872  * out of partially filled vmap blocks. However vmap block sizing should be
1873  * fairly reasonable according to the vmalloc size, so it shouldn't be a
1874  * big problem.
1875  */
1876 
1877 static unsigned long addr_to_vb_idx(unsigned long addr)
1878 {
1879 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1880 	addr /= VMAP_BLOCK_SIZE;
1881 	return addr;
1882 }
1883 
1884 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1885 {
1886 	unsigned long addr;
1887 
1888 	addr = va_start + (pages_off << PAGE_SHIFT);
1889 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1890 	return (void *)addr;
1891 }
1892 
1893 /**
1894  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1895  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1896  * @order:    how many 2^order pages should be occupied in newly allocated block
1897  * @gfp_mask: flags for the page level allocator
1898  *
1899  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1900  */
1901 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1902 {
1903 	struct vmap_block_queue *vbq;
1904 	struct vmap_block *vb;
1905 	struct vmap_area *va;
1906 	unsigned long vb_idx;
1907 	int node, err;
1908 	void *vaddr;
1909 
1910 	node = numa_node_id();
1911 
1912 	vb = kmalloc_node(sizeof(struct vmap_block),
1913 			gfp_mask & GFP_RECLAIM_MASK, node);
1914 	if (unlikely(!vb))
1915 		return ERR_PTR(-ENOMEM);
1916 
1917 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1918 					VMALLOC_START, VMALLOC_END,
1919 					node, gfp_mask);
1920 	if (IS_ERR(va)) {
1921 		kfree(vb);
1922 		return ERR_CAST(va);
1923 	}
1924 
1925 	vaddr = vmap_block_vaddr(va->va_start, 0);
1926 	spin_lock_init(&vb->lock);
1927 	vb->va = va;
1928 	/* At least something should be left free */
1929 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1930 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1931 	vb->dirty = 0;
1932 	vb->dirty_min = VMAP_BBMAP_BITS;
1933 	vb->dirty_max = 0;
1934 	INIT_LIST_HEAD(&vb->free_list);
1935 
1936 	vb_idx = addr_to_vb_idx(va->va_start);
1937 	err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1938 	if (err) {
1939 		kfree(vb);
1940 		free_vmap_area(va);
1941 		return ERR_PTR(err);
1942 	}
1943 
1944 	vbq = &get_cpu_var(vmap_block_queue);
1945 	spin_lock(&vbq->lock);
1946 	list_add_tail_rcu(&vb->free_list, &vbq->free);
1947 	spin_unlock(&vbq->lock);
1948 	put_cpu_var(vmap_block_queue);
1949 
1950 	return vaddr;
1951 }
1952 
1953 static void free_vmap_block(struct vmap_block *vb)
1954 {
1955 	struct vmap_block *tmp;
1956 
1957 	tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1958 	BUG_ON(tmp != vb);
1959 
1960 	free_vmap_area_noflush(vb->va);
1961 	kfree_rcu(vb, rcu_head);
1962 }
1963 
1964 static void purge_fragmented_blocks(int cpu)
1965 {
1966 	LIST_HEAD(purge);
1967 	struct vmap_block *vb;
1968 	struct vmap_block *n_vb;
1969 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1970 
1971 	rcu_read_lock();
1972 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1973 
1974 		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1975 			continue;
1976 
1977 		spin_lock(&vb->lock);
1978 		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1979 			vb->free = 0; /* prevent further allocs after releasing lock */
1980 			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1981 			vb->dirty_min = 0;
1982 			vb->dirty_max = VMAP_BBMAP_BITS;
1983 			spin_lock(&vbq->lock);
1984 			list_del_rcu(&vb->free_list);
1985 			spin_unlock(&vbq->lock);
1986 			spin_unlock(&vb->lock);
1987 			list_add_tail(&vb->purge, &purge);
1988 		} else
1989 			spin_unlock(&vb->lock);
1990 	}
1991 	rcu_read_unlock();
1992 
1993 	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1994 		list_del(&vb->purge);
1995 		free_vmap_block(vb);
1996 	}
1997 }
1998 
1999 static void purge_fragmented_blocks_allcpus(void)
2000 {
2001 	int cpu;
2002 
2003 	for_each_possible_cpu(cpu)
2004 		purge_fragmented_blocks(cpu);
2005 }
2006 
2007 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2008 {
2009 	struct vmap_block_queue *vbq;
2010 	struct vmap_block *vb;
2011 	void *vaddr = NULL;
2012 	unsigned int order;
2013 
2014 	BUG_ON(offset_in_page(size));
2015 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2016 	if (WARN_ON(size == 0)) {
2017 		/*
2018 		 * Allocating 0 bytes isn't what caller wants since
2019 		 * get_order(0) returns funny result. Just warn and terminate
2020 		 * early.
2021 		 */
2022 		return NULL;
2023 	}
2024 	order = get_order(size);
2025 
2026 	rcu_read_lock();
2027 	vbq = &get_cpu_var(vmap_block_queue);
2028 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2029 		unsigned long pages_off;
2030 
2031 		spin_lock(&vb->lock);
2032 		if (vb->free < (1UL << order)) {
2033 			spin_unlock(&vb->lock);
2034 			continue;
2035 		}
2036 
2037 		pages_off = VMAP_BBMAP_BITS - vb->free;
2038 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2039 		vb->free -= 1UL << order;
2040 		if (vb->free == 0) {
2041 			spin_lock(&vbq->lock);
2042 			list_del_rcu(&vb->free_list);
2043 			spin_unlock(&vbq->lock);
2044 		}
2045 
2046 		spin_unlock(&vb->lock);
2047 		break;
2048 	}
2049 
2050 	put_cpu_var(vmap_block_queue);
2051 	rcu_read_unlock();
2052 
2053 	/* Allocate new block if nothing was found */
2054 	if (!vaddr)
2055 		vaddr = new_vmap_block(order, gfp_mask);
2056 
2057 	return vaddr;
2058 }
2059 
2060 static void vb_free(unsigned long addr, unsigned long size)
2061 {
2062 	unsigned long offset;
2063 	unsigned int order;
2064 	struct vmap_block *vb;
2065 
2066 	BUG_ON(offset_in_page(size));
2067 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2068 
2069 	flush_cache_vunmap(addr, addr + size);
2070 
2071 	order = get_order(size);
2072 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2073 	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
2074 
2075 	vunmap_range_noflush(addr, addr + size);
2076 
2077 	if (debug_pagealloc_enabled_static())
2078 		flush_tlb_kernel_range(addr, addr + size);
2079 
2080 	spin_lock(&vb->lock);
2081 
2082 	/* Expand dirty range */
2083 	vb->dirty_min = min(vb->dirty_min, offset);
2084 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2085 
2086 	vb->dirty += 1UL << order;
2087 	if (vb->dirty == VMAP_BBMAP_BITS) {
2088 		BUG_ON(vb->free);
2089 		spin_unlock(&vb->lock);
2090 		free_vmap_block(vb);
2091 	} else
2092 		spin_unlock(&vb->lock);
2093 }
2094 
2095 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2096 {
2097 	int cpu;
2098 
2099 	if (unlikely(!vmap_initialized))
2100 		return;
2101 
2102 	might_sleep();
2103 
2104 	for_each_possible_cpu(cpu) {
2105 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2106 		struct vmap_block *vb;
2107 
2108 		rcu_read_lock();
2109 		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2110 			spin_lock(&vb->lock);
2111 			if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
2112 				unsigned long va_start = vb->va->va_start;
2113 				unsigned long s, e;
2114 
2115 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
2116 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2117 
2118 				start = min(s, start);
2119 				end   = max(e, end);
2120 
2121 				flush = 1;
2122 			}
2123 			spin_unlock(&vb->lock);
2124 		}
2125 		rcu_read_unlock();
2126 	}
2127 
2128 	mutex_lock(&vmap_purge_lock);
2129 	purge_fragmented_blocks_allcpus();
2130 	if (!__purge_vmap_area_lazy(start, end) && flush)
2131 		flush_tlb_kernel_range(start, end);
2132 	mutex_unlock(&vmap_purge_lock);
2133 }
2134 
2135 /**
2136  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2137  *
2138  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2139  * to amortize TLB flushing overheads. What this means is that any page you
2140  * have now, may, in a former life, have been mapped into kernel virtual
2141  * address by the vmap layer and so there might be some CPUs with TLB entries
2142  * still referencing that page (additional to the regular 1:1 kernel mapping).
2143  *
2144  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2145  * be sure that none of the pages we have control over will have any aliases
2146  * from the vmap layer.
2147  */
2148 void vm_unmap_aliases(void)
2149 {
2150 	unsigned long start = ULONG_MAX, end = 0;
2151 	int flush = 0;
2152 
2153 	_vm_unmap_aliases(start, end, flush);
2154 }
2155 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2156 
2157 /**
2158  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2159  * @mem: the pointer returned by vm_map_ram
2160  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2161  */
2162 void vm_unmap_ram(const void *mem, unsigned int count)
2163 {
2164 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2165 	unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2166 	struct vmap_area *va;
2167 
2168 	might_sleep();
2169 	BUG_ON(!addr);
2170 	BUG_ON(addr < VMALLOC_START);
2171 	BUG_ON(addr > VMALLOC_END);
2172 	BUG_ON(!PAGE_ALIGNED(addr));
2173 
2174 	kasan_poison_vmalloc(mem, size);
2175 
2176 	if (likely(count <= VMAP_MAX_ALLOC)) {
2177 		debug_check_no_locks_freed(mem, size);
2178 		vb_free(addr, size);
2179 		return;
2180 	}
2181 
2182 	va = find_vmap_area(addr);
2183 	BUG_ON(!va);
2184 	debug_check_no_locks_freed((void *)va->va_start,
2185 				    (va->va_end - va->va_start));
2186 	free_unmap_vmap_area(va);
2187 }
2188 EXPORT_SYMBOL(vm_unmap_ram);
2189 
2190 /**
2191  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2192  * @pages: an array of pointers to the pages to be mapped
2193  * @count: number of pages
2194  * @node: prefer to allocate data structures on this node
2195  *
2196  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2197  * faster than vmap so it's good.  But if you mix long-life and short-life
2198  * objects with vm_map_ram(), it could consume lots of address space through
2199  * fragmentation (especially on a 32bit machine).  You could see failures in
2200  * the end.  Please use this function for short-lived objects.
2201  *
2202  * Returns: a pointer to the address that has been mapped, or %NULL on failure
2203  */
2204 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2205 {
2206 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2207 	unsigned long addr;
2208 	void *mem;
2209 
2210 	if (likely(count <= VMAP_MAX_ALLOC)) {
2211 		mem = vb_alloc(size, GFP_KERNEL);
2212 		if (IS_ERR(mem))
2213 			return NULL;
2214 		addr = (unsigned long)mem;
2215 	} else {
2216 		struct vmap_area *va;
2217 		va = alloc_vmap_area(size, PAGE_SIZE,
2218 				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
2219 		if (IS_ERR(va))
2220 			return NULL;
2221 
2222 		addr = va->va_start;
2223 		mem = (void *)addr;
2224 	}
2225 
2226 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2227 				pages, PAGE_SHIFT) < 0) {
2228 		vm_unmap_ram(mem, count);
2229 		return NULL;
2230 	}
2231 
2232 	/*
2233 	 * Mark the pages as accessible, now that they are mapped.
2234 	 * With hardware tag-based KASAN, marking is skipped for
2235 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2236 	 */
2237 	mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
2238 
2239 	return mem;
2240 }
2241 EXPORT_SYMBOL(vm_map_ram);
2242 
2243 static struct vm_struct *vmlist __initdata;
2244 
2245 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2246 {
2247 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2248 	return vm->page_order;
2249 #else
2250 	return 0;
2251 #endif
2252 }
2253 
2254 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2255 {
2256 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2257 	vm->page_order = order;
2258 #else
2259 	BUG_ON(order != 0);
2260 #endif
2261 }
2262 
2263 /**
2264  * vm_area_add_early - add vmap area early during boot
2265  * @vm: vm_struct to add
2266  *
2267  * This function is used to add fixed kernel vm area to vmlist before
2268  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2269  * should contain proper values and the other fields should be zero.
2270  *
2271  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2272  */
2273 void __init vm_area_add_early(struct vm_struct *vm)
2274 {
2275 	struct vm_struct *tmp, **p;
2276 
2277 	BUG_ON(vmap_initialized);
2278 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2279 		if (tmp->addr >= vm->addr) {
2280 			BUG_ON(tmp->addr < vm->addr + vm->size);
2281 			break;
2282 		} else
2283 			BUG_ON(tmp->addr + tmp->size > vm->addr);
2284 	}
2285 	vm->next = *p;
2286 	*p = vm;
2287 }
2288 
2289 /**
2290  * vm_area_register_early - register vmap area early during boot
2291  * @vm: vm_struct to register
2292  * @align: requested alignment
2293  *
2294  * This function is used to register kernel vm area before
2295  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2296  * proper values on entry and other fields should be zero.  On return,
2297  * vm->addr contains the allocated address.
2298  *
2299  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2300  */
2301 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2302 {
2303 	unsigned long addr = ALIGN(VMALLOC_START, align);
2304 	struct vm_struct *cur, **p;
2305 
2306 	BUG_ON(vmap_initialized);
2307 
2308 	for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
2309 		if ((unsigned long)cur->addr - addr >= vm->size)
2310 			break;
2311 		addr = ALIGN((unsigned long)cur->addr + cur->size, align);
2312 	}
2313 
2314 	BUG_ON(addr > VMALLOC_END - vm->size);
2315 	vm->addr = (void *)addr;
2316 	vm->next = *p;
2317 	*p = vm;
2318 	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
2319 }
2320 
2321 static void vmap_init_free_space(void)
2322 {
2323 	unsigned long vmap_start = 1;
2324 	const unsigned long vmap_end = ULONG_MAX;
2325 	struct vmap_area *busy, *free;
2326 
2327 	/*
2328 	 *     B     F     B     B     B     F
2329 	 * -|-----|.....|-----|-----|-----|.....|-
2330 	 *  |           The KVA space           |
2331 	 *  |<--------------------------------->|
2332 	 */
2333 	list_for_each_entry(busy, &vmap_area_list, list) {
2334 		if (busy->va_start - vmap_start > 0) {
2335 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2336 			if (!WARN_ON_ONCE(!free)) {
2337 				free->va_start = vmap_start;
2338 				free->va_end = busy->va_start;
2339 
2340 				insert_vmap_area_augment(free, NULL,
2341 					&free_vmap_area_root,
2342 						&free_vmap_area_list);
2343 			}
2344 		}
2345 
2346 		vmap_start = busy->va_end;
2347 	}
2348 
2349 	if (vmap_end - vmap_start > 0) {
2350 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2351 		if (!WARN_ON_ONCE(!free)) {
2352 			free->va_start = vmap_start;
2353 			free->va_end = vmap_end;
2354 
2355 			insert_vmap_area_augment(free, NULL,
2356 				&free_vmap_area_root,
2357 					&free_vmap_area_list);
2358 		}
2359 	}
2360 }
2361 
2362 void __init vmalloc_init(void)
2363 {
2364 	struct vmap_area *va;
2365 	struct vm_struct *tmp;
2366 	int i;
2367 
2368 	/*
2369 	 * Create the cache for vmap_area objects.
2370 	 */
2371 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
2372 
2373 	for_each_possible_cpu(i) {
2374 		struct vmap_block_queue *vbq;
2375 		struct vfree_deferred *p;
2376 
2377 		vbq = &per_cpu(vmap_block_queue, i);
2378 		spin_lock_init(&vbq->lock);
2379 		INIT_LIST_HEAD(&vbq->free);
2380 		p = &per_cpu(vfree_deferred, i);
2381 		init_llist_head(&p->list);
2382 		INIT_WORK(&p->wq, free_work);
2383 	}
2384 
2385 	/* Import existing vmlist entries. */
2386 	for (tmp = vmlist; tmp; tmp = tmp->next) {
2387 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2388 		if (WARN_ON_ONCE(!va))
2389 			continue;
2390 
2391 		va->va_start = (unsigned long)tmp->addr;
2392 		va->va_end = va->va_start + tmp->size;
2393 		va->vm = tmp;
2394 		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2395 	}
2396 
2397 	/*
2398 	 * Now we can initialize a free vmap space.
2399 	 */
2400 	vmap_init_free_space();
2401 	vmap_initialized = true;
2402 }
2403 
2404 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2405 	struct vmap_area *va, unsigned long flags, const void *caller)
2406 {
2407 	vm->flags = flags;
2408 	vm->addr = (void *)va->va_start;
2409 	vm->size = va->va_end - va->va_start;
2410 	vm->caller = caller;
2411 	va->vm = vm;
2412 }
2413 
2414 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2415 			      unsigned long flags, const void *caller)
2416 {
2417 	spin_lock(&vmap_area_lock);
2418 	setup_vmalloc_vm_locked(vm, va, flags, caller);
2419 	spin_unlock(&vmap_area_lock);
2420 }
2421 
2422 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2423 {
2424 	/*
2425 	 * Before removing VM_UNINITIALIZED,
2426 	 * we should make sure that vm has proper values.
2427 	 * Pair with smp_rmb() in show_numa_info().
2428 	 */
2429 	smp_wmb();
2430 	vm->flags &= ~VM_UNINITIALIZED;
2431 }
2432 
2433 static struct vm_struct *__get_vm_area_node(unsigned long size,
2434 		unsigned long align, unsigned long shift, unsigned long flags,
2435 		unsigned long start, unsigned long end, int node,
2436 		gfp_t gfp_mask, const void *caller)
2437 {
2438 	struct vmap_area *va;
2439 	struct vm_struct *area;
2440 	unsigned long requested_size = size;
2441 
2442 	BUG_ON(in_interrupt());
2443 	size = ALIGN(size, 1ul << shift);
2444 	if (unlikely(!size))
2445 		return NULL;
2446 
2447 	if (flags & VM_IOREMAP)
2448 		align = 1ul << clamp_t(int, get_count_order_long(size),
2449 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2450 
2451 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2452 	if (unlikely(!area))
2453 		return NULL;
2454 
2455 	if (!(flags & VM_NO_GUARD))
2456 		size += PAGE_SIZE;
2457 
2458 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2459 	if (IS_ERR(va)) {
2460 		kfree(area);
2461 		return NULL;
2462 	}
2463 
2464 	setup_vmalloc_vm(area, va, flags, caller);
2465 
2466 	/*
2467 	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
2468 	 * best-effort approach, as they can be mapped outside of vmalloc code.
2469 	 * For VM_ALLOC mappings, the pages are marked as accessible after
2470 	 * getting mapped in __vmalloc_node_range().
2471 	 * With hardware tag-based KASAN, marking is skipped for
2472 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2473 	 */
2474 	if (!(flags & VM_ALLOC))
2475 		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
2476 						    KASAN_VMALLOC_PROT_NORMAL);
2477 
2478 	return area;
2479 }
2480 
2481 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2482 				       unsigned long start, unsigned long end,
2483 				       const void *caller)
2484 {
2485 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2486 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2487 }
2488 
2489 /**
2490  * get_vm_area - reserve a contiguous kernel virtual area
2491  * @size:	 size of the area
2492  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2493  *
2494  * Search an area of @size in the kernel virtual mapping area,
2495  * and reserved it for out purposes.  Returns the area descriptor
2496  * on success or %NULL on failure.
2497  *
2498  * Return: the area descriptor on success or %NULL on failure.
2499  */
2500 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2501 {
2502 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2503 				  VMALLOC_START, VMALLOC_END,
2504 				  NUMA_NO_NODE, GFP_KERNEL,
2505 				  __builtin_return_address(0));
2506 }
2507 
2508 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2509 				const void *caller)
2510 {
2511 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2512 				  VMALLOC_START, VMALLOC_END,
2513 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2514 }
2515 
2516 /**
2517  * find_vm_area - find a continuous kernel virtual area
2518  * @addr:	  base address
2519  *
2520  * Search for the kernel VM area starting at @addr, and return it.
2521  * It is up to the caller to do all required locking to keep the returned
2522  * pointer valid.
2523  *
2524  * Return: the area descriptor on success or %NULL on failure.
2525  */
2526 struct vm_struct *find_vm_area(const void *addr)
2527 {
2528 	struct vmap_area *va;
2529 
2530 	va = find_vmap_area((unsigned long)addr);
2531 	if (!va)
2532 		return NULL;
2533 
2534 	return va->vm;
2535 }
2536 
2537 /**
2538  * remove_vm_area - find and remove a continuous kernel virtual area
2539  * @addr:	    base address
2540  *
2541  * Search for the kernel VM area starting at @addr, and remove it.
2542  * This function returns the found VM area, but using it is NOT safe
2543  * on SMP machines, except for its size or flags.
2544  *
2545  * Return: the area descriptor on success or %NULL on failure.
2546  */
2547 struct vm_struct *remove_vm_area(const void *addr)
2548 {
2549 	struct vmap_area *va;
2550 
2551 	might_sleep();
2552 
2553 	spin_lock(&vmap_area_lock);
2554 	va = __find_vmap_area((unsigned long)addr);
2555 	if (va && va->vm) {
2556 		struct vm_struct *vm = va->vm;
2557 
2558 		va->vm = NULL;
2559 		spin_unlock(&vmap_area_lock);
2560 
2561 		kasan_free_module_shadow(vm);
2562 		free_unmap_vmap_area(va);
2563 
2564 		return vm;
2565 	}
2566 
2567 	spin_unlock(&vmap_area_lock);
2568 	return NULL;
2569 }
2570 
2571 static inline void set_area_direct_map(const struct vm_struct *area,
2572 				       int (*set_direct_map)(struct page *page))
2573 {
2574 	int i;
2575 
2576 	/* HUGE_VMALLOC passes small pages to set_direct_map */
2577 	for (i = 0; i < area->nr_pages; i++)
2578 		if (page_address(area->pages[i]))
2579 			set_direct_map(area->pages[i]);
2580 }
2581 
2582 /* Handle removing and resetting vm mappings related to the vm_struct. */
2583 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2584 {
2585 	unsigned long start = ULONG_MAX, end = 0;
2586 	unsigned int page_order = vm_area_page_order(area);
2587 	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2588 	int flush_dmap = 0;
2589 	int i;
2590 
2591 	remove_vm_area(area->addr);
2592 
2593 	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2594 	if (!flush_reset)
2595 		return;
2596 
2597 	/*
2598 	 * If not deallocating pages, just do the flush of the VM area and
2599 	 * return.
2600 	 */
2601 	if (!deallocate_pages) {
2602 		vm_unmap_aliases();
2603 		return;
2604 	}
2605 
2606 	/*
2607 	 * If execution gets here, flush the vm mapping and reset the direct
2608 	 * map. Find the start and end range of the direct mappings to make sure
2609 	 * the vm_unmap_aliases() flush includes the direct map.
2610 	 */
2611 	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2612 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
2613 		if (addr) {
2614 			unsigned long page_size;
2615 
2616 			page_size = PAGE_SIZE << page_order;
2617 			start = min(addr, start);
2618 			end = max(addr + page_size, end);
2619 			flush_dmap = 1;
2620 		}
2621 	}
2622 
2623 	/*
2624 	 * Set direct map to something invalid so that it won't be cached if
2625 	 * there are any accesses after the TLB flush, then flush the TLB and
2626 	 * reset the direct map permissions to the default.
2627 	 */
2628 	set_area_direct_map(area, set_direct_map_invalid_noflush);
2629 	_vm_unmap_aliases(start, end, flush_dmap);
2630 	set_area_direct_map(area, set_direct_map_default_noflush);
2631 }
2632 
2633 static void __vunmap(const void *addr, int deallocate_pages)
2634 {
2635 	struct vm_struct *area;
2636 
2637 	if (!addr)
2638 		return;
2639 
2640 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2641 			addr))
2642 		return;
2643 
2644 	area = find_vm_area(addr);
2645 	if (unlikely(!area)) {
2646 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2647 				addr);
2648 		return;
2649 	}
2650 
2651 	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2652 	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2653 
2654 	kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2655 
2656 	vm_remove_mappings(area, deallocate_pages);
2657 
2658 	if (deallocate_pages) {
2659 		int i;
2660 
2661 		for (i = 0; i < area->nr_pages; i++) {
2662 			struct page *page = area->pages[i];
2663 
2664 			BUG_ON(!page);
2665 			mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
2666 			/*
2667 			 * High-order allocs for huge vmallocs are split, so
2668 			 * can be freed as an array of order-0 allocations
2669 			 */
2670 			__free_pages(page, 0);
2671 			cond_resched();
2672 		}
2673 		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2674 
2675 		kvfree(area->pages);
2676 	}
2677 
2678 	kfree(area);
2679 }
2680 
2681 static inline void __vfree_deferred(const void *addr)
2682 {
2683 	/*
2684 	 * Use raw_cpu_ptr() because this can be called from preemptible
2685 	 * context. Preemption is absolutely fine here, because the llist_add()
2686 	 * implementation is lockless, so it works even if we are adding to
2687 	 * another cpu's list. schedule_work() should be fine with this too.
2688 	 */
2689 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2690 
2691 	if (llist_add((struct llist_node *)addr, &p->list))
2692 		schedule_work(&p->wq);
2693 }
2694 
2695 /**
2696  * vfree_atomic - release memory allocated by vmalloc()
2697  * @addr:	  memory base address
2698  *
2699  * This one is just like vfree() but can be called in any atomic context
2700  * except NMIs.
2701  */
2702 void vfree_atomic(const void *addr)
2703 {
2704 	BUG_ON(in_nmi());
2705 
2706 	kmemleak_free(addr);
2707 
2708 	if (!addr)
2709 		return;
2710 	__vfree_deferred(addr);
2711 }
2712 
2713 static void __vfree(const void *addr)
2714 {
2715 	if (unlikely(in_interrupt()))
2716 		__vfree_deferred(addr);
2717 	else
2718 		__vunmap(addr, 1);
2719 }
2720 
2721 /**
2722  * vfree - Release memory allocated by vmalloc()
2723  * @addr:  Memory base address
2724  *
2725  * Free the virtually continuous memory area starting at @addr, as obtained
2726  * from one of the vmalloc() family of APIs.  This will usually also free the
2727  * physical memory underlying the virtual allocation, but that memory is
2728  * reference counted, so it will not be freed until the last user goes away.
2729  *
2730  * If @addr is NULL, no operation is performed.
2731  *
2732  * Context:
2733  * May sleep if called *not* from interrupt context.
2734  * Must not be called in NMI context (strictly speaking, it could be
2735  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2736  * conventions for vfree() arch-dependent would be a really bad idea).
2737  */
2738 void vfree(const void *addr)
2739 {
2740 	BUG_ON(in_nmi());
2741 
2742 	kmemleak_free(addr);
2743 
2744 	might_sleep_if(!in_interrupt());
2745 
2746 	if (!addr)
2747 		return;
2748 
2749 	__vfree(addr);
2750 }
2751 EXPORT_SYMBOL(vfree);
2752 
2753 /**
2754  * vunmap - release virtual mapping obtained by vmap()
2755  * @addr:   memory base address
2756  *
2757  * Free the virtually contiguous memory area starting at @addr,
2758  * which was created from the page array passed to vmap().
2759  *
2760  * Must not be called in interrupt context.
2761  */
2762 void vunmap(const void *addr)
2763 {
2764 	BUG_ON(in_interrupt());
2765 	might_sleep();
2766 	if (addr)
2767 		__vunmap(addr, 0);
2768 }
2769 EXPORT_SYMBOL(vunmap);
2770 
2771 /**
2772  * vmap - map an array of pages into virtually contiguous space
2773  * @pages: array of page pointers
2774  * @count: number of pages to map
2775  * @flags: vm_area->flags
2776  * @prot: page protection for the mapping
2777  *
2778  * Maps @count pages from @pages into contiguous kernel virtual space.
2779  * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2780  * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2781  * are transferred from the caller to vmap(), and will be freed / dropped when
2782  * vfree() is called on the return value.
2783  *
2784  * Return: the address of the area or %NULL on failure
2785  */
2786 void *vmap(struct page **pages, unsigned int count,
2787 	   unsigned long flags, pgprot_t prot)
2788 {
2789 	struct vm_struct *area;
2790 	unsigned long addr;
2791 	unsigned long size;		/* In bytes */
2792 
2793 	might_sleep();
2794 
2795 	/*
2796 	 * Your top guard is someone else's bottom guard. Not having a top
2797 	 * guard compromises someone else's mappings too.
2798 	 */
2799 	if (WARN_ON_ONCE(flags & VM_NO_GUARD))
2800 		flags &= ~VM_NO_GUARD;
2801 
2802 	if (count > totalram_pages())
2803 		return NULL;
2804 
2805 	size = (unsigned long)count << PAGE_SHIFT;
2806 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2807 	if (!area)
2808 		return NULL;
2809 
2810 	addr = (unsigned long)area->addr;
2811 	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2812 				pages, PAGE_SHIFT) < 0) {
2813 		vunmap(area->addr);
2814 		return NULL;
2815 	}
2816 
2817 	if (flags & VM_MAP_PUT_PAGES) {
2818 		area->pages = pages;
2819 		area->nr_pages = count;
2820 	}
2821 	return area->addr;
2822 }
2823 EXPORT_SYMBOL(vmap);
2824 
2825 #ifdef CONFIG_VMAP_PFN
2826 struct vmap_pfn_data {
2827 	unsigned long	*pfns;
2828 	pgprot_t	prot;
2829 	unsigned int	idx;
2830 };
2831 
2832 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2833 {
2834 	struct vmap_pfn_data *data = private;
2835 
2836 	if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2837 		return -EINVAL;
2838 	*pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2839 	return 0;
2840 }
2841 
2842 /**
2843  * vmap_pfn - map an array of PFNs into virtually contiguous space
2844  * @pfns: array of PFNs
2845  * @count: number of pages to map
2846  * @prot: page protection for the mapping
2847  *
2848  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2849  * the start address of the mapping.
2850  */
2851 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2852 {
2853 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2854 	struct vm_struct *area;
2855 
2856 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2857 			__builtin_return_address(0));
2858 	if (!area)
2859 		return NULL;
2860 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2861 			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2862 		free_vm_area(area);
2863 		return NULL;
2864 	}
2865 	return area->addr;
2866 }
2867 EXPORT_SYMBOL_GPL(vmap_pfn);
2868 #endif /* CONFIG_VMAP_PFN */
2869 
2870 static inline unsigned int
2871 vm_area_alloc_pages(gfp_t gfp, int nid,
2872 		unsigned int order, unsigned int nr_pages, struct page **pages)
2873 {
2874 	unsigned int nr_allocated = 0;
2875 	struct page *page;
2876 	int i;
2877 
2878 	/*
2879 	 * For order-0 pages we make use of bulk allocator, if
2880 	 * the page array is partly or not at all populated due
2881 	 * to fails, fallback to a single page allocator that is
2882 	 * more permissive.
2883 	 */
2884 	if (!order) {
2885 		gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
2886 
2887 		while (nr_allocated < nr_pages) {
2888 			unsigned int nr, nr_pages_request;
2889 
2890 			/*
2891 			 * A maximum allowed request is hard-coded and is 100
2892 			 * pages per call. That is done in order to prevent a
2893 			 * long preemption off scenario in the bulk-allocator
2894 			 * so the range is [1:100].
2895 			 */
2896 			nr_pages_request = min(100U, nr_pages - nr_allocated);
2897 
2898 			/* memory allocation should consider mempolicy, we can't
2899 			 * wrongly use nearest node when nid == NUMA_NO_NODE,
2900 			 * otherwise memory may be allocated in only one node,
2901 			 * but mempolicy wants to alloc memory by interleaving.
2902 			 */
2903 			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
2904 				nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
2905 							nr_pages_request,
2906 							pages + nr_allocated);
2907 
2908 			else
2909 				nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
2910 							nr_pages_request,
2911 							pages + nr_allocated);
2912 
2913 			nr_allocated += nr;
2914 			cond_resched();
2915 
2916 			/*
2917 			 * If zero or pages were obtained partly,
2918 			 * fallback to a single page allocator.
2919 			 */
2920 			if (nr != nr_pages_request)
2921 				break;
2922 		}
2923 	}
2924 
2925 	/* High-order pages or fallback path if "bulk" fails. */
2926 
2927 	while (nr_allocated < nr_pages) {
2928 		if (fatal_signal_pending(current))
2929 			break;
2930 
2931 		if (nid == NUMA_NO_NODE)
2932 			page = alloc_pages(gfp, order);
2933 		else
2934 			page = alloc_pages_node(nid, gfp, order);
2935 		if (unlikely(!page))
2936 			break;
2937 		/*
2938 		 * Higher order allocations must be able to be treated as
2939 		 * indepdenent small pages by callers (as they can with
2940 		 * small-page vmallocs). Some drivers do their own refcounting
2941 		 * on vmalloc_to_page() pages, some use page->mapping,
2942 		 * page->lru, etc.
2943 		 */
2944 		if (order)
2945 			split_page(page, order);
2946 
2947 		/*
2948 		 * Careful, we allocate and map page-order pages, but
2949 		 * tracking is done per PAGE_SIZE page so as to keep the
2950 		 * vm_struct APIs independent of the physical/mapped size.
2951 		 */
2952 		for (i = 0; i < (1U << order); i++)
2953 			pages[nr_allocated + i] = page + i;
2954 
2955 		cond_resched();
2956 		nr_allocated += 1U << order;
2957 	}
2958 
2959 	return nr_allocated;
2960 }
2961 
2962 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2963 				 pgprot_t prot, unsigned int page_shift,
2964 				 int node)
2965 {
2966 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2967 	bool nofail = gfp_mask & __GFP_NOFAIL;
2968 	unsigned long addr = (unsigned long)area->addr;
2969 	unsigned long size = get_vm_area_size(area);
2970 	unsigned long array_size;
2971 	unsigned int nr_small_pages = size >> PAGE_SHIFT;
2972 	unsigned int page_order;
2973 	unsigned int flags;
2974 	int ret;
2975 
2976 	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
2977 	gfp_mask |= __GFP_NOWARN;
2978 	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
2979 		gfp_mask |= __GFP_HIGHMEM;
2980 
2981 	/* Please note that the recursion is strictly bounded. */
2982 	if (array_size > PAGE_SIZE) {
2983 		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
2984 					area->caller);
2985 	} else {
2986 		area->pages = kmalloc_node(array_size, nested_gfp, node);
2987 	}
2988 
2989 	if (!area->pages) {
2990 		warn_alloc(gfp_mask, NULL,
2991 			"vmalloc error: size %lu, failed to allocated page array size %lu",
2992 			nr_small_pages * PAGE_SIZE, array_size);
2993 		free_vm_area(area);
2994 		return NULL;
2995 	}
2996 
2997 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
2998 	page_order = vm_area_page_order(area);
2999 
3000 	area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
3001 		node, page_order, nr_small_pages, area->pages);
3002 
3003 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
3004 	if (gfp_mask & __GFP_ACCOUNT) {
3005 		int i;
3006 
3007 		for (i = 0; i < area->nr_pages; i++)
3008 			mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
3009 	}
3010 
3011 	/*
3012 	 * If not enough pages were obtained to accomplish an
3013 	 * allocation request, free them via __vfree() if any.
3014 	 */
3015 	if (area->nr_pages != nr_small_pages) {
3016 		warn_alloc(gfp_mask, NULL,
3017 			"vmalloc error: size %lu, page order %u, failed to allocate pages",
3018 			area->nr_pages * PAGE_SIZE, page_order);
3019 		goto fail;
3020 	}
3021 
3022 	/*
3023 	 * page tables allocations ignore external gfp mask, enforce it
3024 	 * by the scope API
3025 	 */
3026 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3027 		flags = memalloc_nofs_save();
3028 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3029 		flags = memalloc_noio_save();
3030 
3031 	do {
3032 		ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3033 			page_shift);
3034 		if (nofail && (ret < 0))
3035 			schedule_timeout_uninterruptible(1);
3036 	} while (nofail && (ret < 0));
3037 
3038 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3039 		memalloc_nofs_restore(flags);
3040 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3041 		memalloc_noio_restore(flags);
3042 
3043 	if (ret < 0) {
3044 		warn_alloc(gfp_mask, NULL,
3045 			"vmalloc error: size %lu, failed to map pages",
3046 			area->nr_pages * PAGE_SIZE);
3047 		goto fail;
3048 	}
3049 
3050 	return area->addr;
3051 
3052 fail:
3053 	__vfree(area->addr);
3054 	return NULL;
3055 }
3056 
3057 /**
3058  * __vmalloc_node_range - allocate virtually contiguous memory
3059  * @size:		  allocation size
3060  * @align:		  desired alignment
3061  * @start:		  vm area range start
3062  * @end:		  vm area range end
3063  * @gfp_mask:		  flags for the page level allocator
3064  * @prot:		  protection mask for the allocated pages
3065  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
3066  * @node:		  node to use for allocation or NUMA_NO_NODE
3067  * @caller:		  caller's return address
3068  *
3069  * Allocate enough pages to cover @size from the page level
3070  * allocator with @gfp_mask flags. Please note that the full set of gfp
3071  * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3072  * supported.
3073  * Zone modifiers are not supported. From the reclaim modifiers
3074  * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3075  * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3076  * __GFP_RETRY_MAYFAIL are not supported).
3077  *
3078  * __GFP_NOWARN can be used to suppress failures messages.
3079  *
3080  * Map them into contiguous kernel virtual space, using a pagetable
3081  * protection of @prot.
3082  *
3083  * Return: the address of the area or %NULL on failure
3084  */
3085 void *__vmalloc_node_range(unsigned long size, unsigned long align,
3086 			unsigned long start, unsigned long end, gfp_t gfp_mask,
3087 			pgprot_t prot, unsigned long vm_flags, int node,
3088 			const void *caller)
3089 {
3090 	struct vm_struct *area;
3091 	void *ret;
3092 	kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3093 	unsigned long real_size = size;
3094 	unsigned long real_align = align;
3095 	unsigned int shift = PAGE_SHIFT;
3096 
3097 	if (WARN_ON_ONCE(!size))
3098 		return NULL;
3099 
3100 	if ((size >> PAGE_SHIFT) > totalram_pages()) {
3101 		warn_alloc(gfp_mask, NULL,
3102 			"vmalloc error: size %lu, exceeds total pages",
3103 			real_size);
3104 		return NULL;
3105 	}
3106 
3107 	if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3108 		unsigned long size_per_node;
3109 
3110 		/*
3111 		 * Try huge pages. Only try for PAGE_KERNEL allocations,
3112 		 * others like modules don't yet expect huge pages in
3113 		 * their allocations due to apply_to_page_range not
3114 		 * supporting them.
3115 		 */
3116 
3117 		size_per_node = size;
3118 		if (node == NUMA_NO_NODE)
3119 			size_per_node /= num_online_nodes();
3120 		if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3121 			shift = PMD_SHIFT;
3122 		else
3123 			shift = arch_vmap_pte_supported_shift(size_per_node);
3124 
3125 		align = max(real_align, 1UL << shift);
3126 		size = ALIGN(real_size, 1UL << shift);
3127 	}
3128 
3129 again:
3130 	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3131 				  VM_UNINITIALIZED | vm_flags, start, end, node,
3132 				  gfp_mask, caller);
3133 	if (!area) {
3134 		bool nofail = gfp_mask & __GFP_NOFAIL;
3135 		warn_alloc(gfp_mask, NULL,
3136 			"vmalloc error: size %lu, vm_struct allocation failed%s",
3137 			real_size, (nofail) ? ". Retrying." : "");
3138 		if (nofail) {
3139 			schedule_timeout_uninterruptible(1);
3140 			goto again;
3141 		}
3142 		goto fail;
3143 	}
3144 
3145 	/*
3146 	 * Prepare arguments for __vmalloc_area_node() and
3147 	 * kasan_unpoison_vmalloc().
3148 	 */
3149 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3150 		if (kasan_hw_tags_enabled()) {
3151 			/*
3152 			 * Modify protection bits to allow tagging.
3153 			 * This must be done before mapping.
3154 			 */
3155 			prot = arch_vmap_pgprot_tagged(prot);
3156 
3157 			/*
3158 			 * Skip page_alloc poisoning and zeroing for physical
3159 			 * pages backing VM_ALLOC mapping. Memory is instead
3160 			 * poisoned and zeroed by kasan_unpoison_vmalloc().
3161 			 */
3162 			gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
3163 		}
3164 
3165 		/* Take note that the mapping is PAGE_KERNEL. */
3166 		kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3167 	}
3168 
3169 	/* Allocate physical pages and map them into vmalloc space. */
3170 	ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3171 	if (!ret)
3172 		goto fail;
3173 
3174 	/*
3175 	 * Mark the pages as accessible, now that they are mapped.
3176 	 * The init condition should match the one in post_alloc_hook()
3177 	 * (except for the should_skip_init() check) to make sure that memory
3178 	 * is initialized under the same conditions regardless of the enabled
3179 	 * KASAN mode.
3180 	 * Tag-based KASAN modes only assign tags to normal non-executable
3181 	 * allocations, see __kasan_unpoison_vmalloc().
3182 	 */
3183 	kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3184 	if (!want_init_on_free() && want_init_on_alloc(gfp_mask))
3185 		kasan_flags |= KASAN_VMALLOC_INIT;
3186 	/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3187 	area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3188 
3189 	/*
3190 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3191 	 * flag. It means that vm_struct is not fully initialized.
3192 	 * Now, it is fully initialized, so remove this flag here.
3193 	 */
3194 	clear_vm_uninitialized_flag(area);
3195 
3196 	size = PAGE_ALIGN(size);
3197 	if (!(vm_flags & VM_DEFER_KMEMLEAK))
3198 		kmemleak_vmalloc(area, size, gfp_mask);
3199 
3200 	return area->addr;
3201 
3202 fail:
3203 	if (shift > PAGE_SHIFT) {
3204 		shift = PAGE_SHIFT;
3205 		align = real_align;
3206 		size = real_size;
3207 		goto again;
3208 	}
3209 
3210 	return NULL;
3211 }
3212 
3213 /**
3214  * __vmalloc_node - allocate virtually contiguous memory
3215  * @size:	    allocation size
3216  * @align:	    desired alignment
3217  * @gfp_mask:	    flags for the page level allocator
3218  * @node:	    node to use for allocation or NUMA_NO_NODE
3219  * @caller:	    caller's return address
3220  *
3221  * Allocate enough pages to cover @size from the page level allocator with
3222  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
3223  *
3224  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3225  * and __GFP_NOFAIL are not supported
3226  *
3227  * Any use of gfp flags outside of GFP_KERNEL should be consulted
3228  * with mm people.
3229  *
3230  * Return: pointer to the allocated memory or %NULL on error
3231  */
3232 void *__vmalloc_node(unsigned long size, unsigned long align,
3233 			    gfp_t gfp_mask, int node, const void *caller)
3234 {
3235 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3236 				gfp_mask, PAGE_KERNEL, 0, node, caller);
3237 }
3238 /*
3239  * This is only for performance analysis of vmalloc and stress purpose.
3240  * It is required by vmalloc test module, therefore do not use it other
3241  * than that.
3242  */
3243 #ifdef CONFIG_TEST_VMALLOC_MODULE
3244 EXPORT_SYMBOL_GPL(__vmalloc_node);
3245 #endif
3246 
3247 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3248 {
3249 	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3250 				__builtin_return_address(0));
3251 }
3252 EXPORT_SYMBOL(__vmalloc);
3253 
3254 /**
3255  * vmalloc - allocate virtually contiguous memory
3256  * @size:    allocation size
3257  *
3258  * Allocate enough pages to cover @size from the page level
3259  * allocator and map them into contiguous kernel virtual space.
3260  *
3261  * For tight control over page level allocator and protection flags
3262  * use __vmalloc() instead.
3263  *
3264  * Return: pointer to the allocated memory or %NULL on error
3265  */
3266 void *vmalloc(unsigned long size)
3267 {
3268 	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3269 				__builtin_return_address(0));
3270 }
3271 EXPORT_SYMBOL(vmalloc);
3272 
3273 /**
3274  * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
3275  * @size:      allocation size
3276  * @gfp_mask:  flags for the page level allocator
3277  *
3278  * Allocate enough pages to cover @size from the page level
3279  * allocator and map them into contiguous kernel virtual space.
3280  * If @size is greater than or equal to PMD_SIZE, allow using
3281  * huge pages for the memory
3282  *
3283  * Return: pointer to the allocated memory or %NULL on error
3284  */
3285 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
3286 {
3287 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3288 				    gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
3289 				    NUMA_NO_NODE, __builtin_return_address(0));
3290 }
3291 EXPORT_SYMBOL_GPL(vmalloc_huge);
3292 
3293 /**
3294  * vzalloc - allocate virtually contiguous memory with zero fill
3295  * @size:    allocation size
3296  *
3297  * Allocate enough pages to cover @size from the page level
3298  * allocator and map them into contiguous kernel virtual space.
3299  * The memory allocated is set to zero.
3300  *
3301  * For tight control over page level allocator and protection flags
3302  * use __vmalloc() instead.
3303  *
3304  * Return: pointer to the allocated memory or %NULL on error
3305  */
3306 void *vzalloc(unsigned long size)
3307 {
3308 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3309 				__builtin_return_address(0));
3310 }
3311 EXPORT_SYMBOL(vzalloc);
3312 
3313 /**
3314  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3315  * @size: allocation size
3316  *
3317  * The resulting memory area is zeroed so it can be mapped to userspace
3318  * without leaking data.
3319  *
3320  * Return: pointer to the allocated memory or %NULL on error
3321  */
3322 void *vmalloc_user(unsigned long size)
3323 {
3324 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3325 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3326 				    VM_USERMAP, NUMA_NO_NODE,
3327 				    __builtin_return_address(0));
3328 }
3329 EXPORT_SYMBOL(vmalloc_user);
3330 
3331 /**
3332  * vmalloc_node - allocate memory on a specific node
3333  * @size:	  allocation size
3334  * @node:	  numa node
3335  *
3336  * Allocate enough pages to cover @size from the page level
3337  * allocator and map them into contiguous kernel virtual space.
3338  *
3339  * For tight control over page level allocator and protection flags
3340  * use __vmalloc() instead.
3341  *
3342  * Return: pointer to the allocated memory or %NULL on error
3343  */
3344 void *vmalloc_node(unsigned long size, int node)
3345 {
3346 	return __vmalloc_node(size, 1, GFP_KERNEL, node,
3347 			__builtin_return_address(0));
3348 }
3349 EXPORT_SYMBOL(vmalloc_node);
3350 
3351 /**
3352  * vzalloc_node - allocate memory on a specific node with zero fill
3353  * @size:	allocation size
3354  * @node:	numa node
3355  *
3356  * Allocate enough pages to cover @size from the page level
3357  * allocator and map them into contiguous kernel virtual space.
3358  * The memory allocated is set to zero.
3359  *
3360  * Return: pointer to the allocated memory or %NULL on error
3361  */
3362 void *vzalloc_node(unsigned long size, int node)
3363 {
3364 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3365 				__builtin_return_address(0));
3366 }
3367 EXPORT_SYMBOL(vzalloc_node);
3368 
3369 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3370 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3371 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3372 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3373 #else
3374 /*
3375  * 64b systems should always have either DMA or DMA32 zones. For others
3376  * GFP_DMA32 should do the right thing and use the normal zone.
3377  */
3378 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3379 #endif
3380 
3381 /**
3382  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3383  * @size:	allocation size
3384  *
3385  * Allocate enough 32bit PA addressable pages to cover @size from the
3386  * page level allocator and map them into contiguous kernel virtual space.
3387  *
3388  * Return: pointer to the allocated memory or %NULL on error
3389  */
3390 void *vmalloc_32(unsigned long size)
3391 {
3392 	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3393 			__builtin_return_address(0));
3394 }
3395 EXPORT_SYMBOL(vmalloc_32);
3396 
3397 /**
3398  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3399  * @size:	     allocation size
3400  *
3401  * The resulting memory area is 32bit addressable and zeroed so it can be
3402  * mapped to userspace without leaking data.
3403  *
3404  * Return: pointer to the allocated memory or %NULL on error
3405  */
3406 void *vmalloc_32_user(unsigned long size)
3407 {
3408 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3409 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3410 				    VM_USERMAP, NUMA_NO_NODE,
3411 				    __builtin_return_address(0));
3412 }
3413 EXPORT_SYMBOL(vmalloc_32_user);
3414 
3415 /*
3416  * small helper routine , copy contents to buf from addr.
3417  * If the page is not present, fill zero.
3418  */
3419 
3420 static int aligned_vread(char *buf, char *addr, unsigned long count)
3421 {
3422 	struct page *p;
3423 	int copied = 0;
3424 
3425 	while (count) {
3426 		unsigned long offset, length;
3427 
3428 		offset = offset_in_page(addr);
3429 		length = PAGE_SIZE - offset;
3430 		if (length > count)
3431 			length = count;
3432 		p = vmalloc_to_page(addr);
3433 		/*
3434 		 * To do safe access to this _mapped_ area, we need
3435 		 * lock. But adding lock here means that we need to add
3436 		 * overhead of vmalloc()/vfree() calls for this _debug_
3437 		 * interface, rarely used. Instead of that, we'll use
3438 		 * kmap() and get small overhead in this access function.
3439 		 */
3440 		if (p) {
3441 			/* We can expect USER0 is not used -- see vread() */
3442 			void *map = kmap_atomic(p);
3443 			memcpy(buf, map + offset, length);
3444 			kunmap_atomic(map);
3445 		} else
3446 			memset(buf, 0, length);
3447 
3448 		addr += length;
3449 		buf += length;
3450 		copied += length;
3451 		count -= length;
3452 	}
3453 	return copied;
3454 }
3455 
3456 /**
3457  * vread() - read vmalloc area in a safe way.
3458  * @buf:     buffer for reading data
3459  * @addr:    vm address.
3460  * @count:   number of bytes to be read.
3461  *
3462  * This function checks that addr is a valid vmalloc'ed area, and
3463  * copy data from that area to a given buffer. If the given memory range
3464  * of [addr...addr+count) includes some valid address, data is copied to
3465  * proper area of @buf. If there are memory holes, they'll be zero-filled.
3466  * IOREMAP area is treated as memory hole and no copy is done.
3467  *
3468  * If [addr...addr+count) doesn't includes any intersects with alive
3469  * vm_struct area, returns 0. @buf should be kernel's buffer.
3470  *
3471  * Note: In usual ops, vread() is never necessary because the caller
3472  * should know vmalloc() area is valid and can use memcpy().
3473  * This is for routines which have to access vmalloc area without
3474  * any information, as /proc/kcore.
3475  *
3476  * Return: number of bytes for which addr and buf should be increased
3477  * (same number as @count) or %0 if [addr...addr+count) doesn't
3478  * include any intersection with valid vmalloc area
3479  */
3480 long vread(char *buf, char *addr, unsigned long count)
3481 {
3482 	struct vmap_area *va;
3483 	struct vm_struct *vm;
3484 	char *vaddr, *buf_start = buf;
3485 	unsigned long buflen = count;
3486 	unsigned long n;
3487 
3488 	addr = kasan_reset_tag(addr);
3489 
3490 	/* Don't allow overflow */
3491 	if ((unsigned long) addr + count < count)
3492 		count = -(unsigned long) addr;
3493 
3494 	spin_lock(&vmap_area_lock);
3495 	va = find_vmap_area_exceed_addr((unsigned long)addr);
3496 	if (!va)
3497 		goto finished;
3498 
3499 	/* no intersects with alive vmap_area */
3500 	if ((unsigned long)addr + count <= va->va_start)
3501 		goto finished;
3502 
3503 	list_for_each_entry_from(va, &vmap_area_list, list) {
3504 		if (!count)
3505 			break;
3506 
3507 		if (!va->vm)
3508 			continue;
3509 
3510 		vm = va->vm;
3511 		vaddr = (char *) vm->addr;
3512 		if (addr >= vaddr + get_vm_area_size(vm))
3513 			continue;
3514 		while (addr < vaddr) {
3515 			if (count == 0)
3516 				goto finished;
3517 			*buf = '\0';
3518 			buf++;
3519 			addr++;
3520 			count--;
3521 		}
3522 		n = vaddr + get_vm_area_size(vm) - addr;
3523 		if (n > count)
3524 			n = count;
3525 		if (!(vm->flags & VM_IOREMAP))
3526 			aligned_vread(buf, addr, n);
3527 		else /* IOREMAP area is treated as memory hole */
3528 			memset(buf, 0, n);
3529 		buf += n;
3530 		addr += n;
3531 		count -= n;
3532 	}
3533 finished:
3534 	spin_unlock(&vmap_area_lock);
3535 
3536 	if (buf == buf_start)
3537 		return 0;
3538 	/* zero-fill memory holes */
3539 	if (buf != buf_start + buflen)
3540 		memset(buf, 0, buflen - (buf - buf_start));
3541 
3542 	return buflen;
3543 }
3544 
3545 /**
3546  * remap_vmalloc_range_partial - map vmalloc pages to userspace
3547  * @vma:		vma to cover
3548  * @uaddr:		target user address to start at
3549  * @kaddr:		virtual address of vmalloc kernel memory
3550  * @pgoff:		offset from @kaddr to start at
3551  * @size:		size of map area
3552  *
3553  * Returns:	0 for success, -Exxx on failure
3554  *
3555  * This function checks that @kaddr is a valid vmalloc'ed area,
3556  * and that it is big enough to cover the range starting at
3557  * @uaddr in @vma. Will return failure if that criteria isn't
3558  * met.
3559  *
3560  * Similar to remap_pfn_range() (see mm/memory.c)
3561  */
3562 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3563 				void *kaddr, unsigned long pgoff,
3564 				unsigned long size)
3565 {
3566 	struct vm_struct *area;
3567 	unsigned long off;
3568 	unsigned long end_index;
3569 
3570 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3571 		return -EINVAL;
3572 
3573 	size = PAGE_ALIGN(size);
3574 
3575 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3576 		return -EINVAL;
3577 
3578 	area = find_vm_area(kaddr);
3579 	if (!area)
3580 		return -EINVAL;
3581 
3582 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3583 		return -EINVAL;
3584 
3585 	if (check_add_overflow(size, off, &end_index) ||
3586 	    end_index > get_vm_area_size(area))
3587 		return -EINVAL;
3588 	kaddr += off;
3589 
3590 	do {
3591 		struct page *page = vmalloc_to_page(kaddr);
3592 		int ret;
3593 
3594 		ret = vm_insert_page(vma, uaddr, page);
3595 		if (ret)
3596 			return ret;
3597 
3598 		uaddr += PAGE_SIZE;
3599 		kaddr += PAGE_SIZE;
3600 		size -= PAGE_SIZE;
3601 	} while (size > 0);
3602 
3603 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3604 
3605 	return 0;
3606 }
3607 
3608 /**
3609  * remap_vmalloc_range - map vmalloc pages to userspace
3610  * @vma:		vma to cover (map full range of vma)
3611  * @addr:		vmalloc memory
3612  * @pgoff:		number of pages into addr before first page to map
3613  *
3614  * Returns:	0 for success, -Exxx on failure
3615  *
3616  * This function checks that addr is a valid vmalloc'ed area, and
3617  * that it is big enough to cover the vma. Will return failure if
3618  * that criteria isn't met.
3619  *
3620  * Similar to remap_pfn_range() (see mm/memory.c)
3621  */
3622 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3623 						unsigned long pgoff)
3624 {
3625 	return remap_vmalloc_range_partial(vma, vma->vm_start,
3626 					   addr, pgoff,
3627 					   vma->vm_end - vma->vm_start);
3628 }
3629 EXPORT_SYMBOL(remap_vmalloc_range);
3630 
3631 void free_vm_area(struct vm_struct *area)
3632 {
3633 	struct vm_struct *ret;
3634 	ret = remove_vm_area(area->addr);
3635 	BUG_ON(ret != area);
3636 	kfree(area);
3637 }
3638 EXPORT_SYMBOL_GPL(free_vm_area);
3639 
3640 #ifdef CONFIG_SMP
3641 static struct vmap_area *node_to_va(struct rb_node *n)
3642 {
3643 	return rb_entry_safe(n, struct vmap_area, rb_node);
3644 }
3645 
3646 /**
3647  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3648  * @addr: target address
3649  *
3650  * Returns: vmap_area if it is found. If there is no such area
3651  *   the first highest(reverse order) vmap_area is returned
3652  *   i.e. va->va_start < addr && va->va_end < addr or NULL
3653  *   if there are no any areas before @addr.
3654  */
3655 static struct vmap_area *
3656 pvm_find_va_enclose_addr(unsigned long addr)
3657 {
3658 	struct vmap_area *va, *tmp;
3659 	struct rb_node *n;
3660 
3661 	n = free_vmap_area_root.rb_node;
3662 	va = NULL;
3663 
3664 	while (n) {
3665 		tmp = rb_entry(n, struct vmap_area, rb_node);
3666 		if (tmp->va_start <= addr) {
3667 			va = tmp;
3668 			if (tmp->va_end >= addr)
3669 				break;
3670 
3671 			n = n->rb_right;
3672 		} else {
3673 			n = n->rb_left;
3674 		}
3675 	}
3676 
3677 	return va;
3678 }
3679 
3680 /**
3681  * pvm_determine_end_from_reverse - find the highest aligned address
3682  * of free block below VMALLOC_END
3683  * @va:
3684  *   in - the VA we start the search(reverse order);
3685  *   out - the VA with the highest aligned end address.
3686  * @align: alignment for required highest address
3687  *
3688  * Returns: determined end address within vmap_area
3689  */
3690 static unsigned long
3691 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3692 {
3693 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3694 	unsigned long addr;
3695 
3696 	if (likely(*va)) {
3697 		list_for_each_entry_from_reverse((*va),
3698 				&free_vmap_area_list, list) {
3699 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3700 			if ((*va)->va_start < addr)
3701 				return addr;
3702 		}
3703 	}
3704 
3705 	return 0;
3706 }
3707 
3708 /**
3709  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3710  * @offsets: array containing offset of each area
3711  * @sizes: array containing size of each area
3712  * @nr_vms: the number of areas to allocate
3713  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3714  *
3715  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3716  *	    vm_structs on success, %NULL on failure
3717  *
3718  * Percpu allocator wants to use congruent vm areas so that it can
3719  * maintain the offsets among percpu areas.  This function allocates
3720  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3721  * be scattered pretty far, distance between two areas easily going up
3722  * to gigabytes.  To avoid interacting with regular vmallocs, these
3723  * areas are allocated from top.
3724  *
3725  * Despite its complicated look, this allocator is rather simple. It
3726  * does everything top-down and scans free blocks from the end looking
3727  * for matching base. While scanning, if any of the areas do not fit the
3728  * base address is pulled down to fit the area. Scanning is repeated till
3729  * all the areas fit and then all necessary data structures are inserted
3730  * and the result is returned.
3731  */
3732 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3733 				     const size_t *sizes, int nr_vms,
3734 				     size_t align)
3735 {
3736 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3737 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3738 	struct vmap_area **vas, *va;
3739 	struct vm_struct **vms;
3740 	int area, area2, last_area, term_area;
3741 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
3742 	bool purged = false;
3743 	enum fit_type type;
3744 
3745 	/* verify parameters and allocate data structures */
3746 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3747 	for (last_area = 0, area = 0; area < nr_vms; area++) {
3748 		start = offsets[area];
3749 		end = start + sizes[area];
3750 
3751 		/* is everything aligned properly? */
3752 		BUG_ON(!IS_ALIGNED(offsets[area], align));
3753 		BUG_ON(!IS_ALIGNED(sizes[area], align));
3754 
3755 		/* detect the area with the highest address */
3756 		if (start > offsets[last_area])
3757 			last_area = area;
3758 
3759 		for (area2 = area + 1; area2 < nr_vms; area2++) {
3760 			unsigned long start2 = offsets[area2];
3761 			unsigned long end2 = start2 + sizes[area2];
3762 
3763 			BUG_ON(start2 < end && start < end2);
3764 		}
3765 	}
3766 	last_end = offsets[last_area] + sizes[last_area];
3767 
3768 	if (vmalloc_end - vmalloc_start < last_end) {
3769 		WARN_ON(true);
3770 		return NULL;
3771 	}
3772 
3773 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3774 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3775 	if (!vas || !vms)
3776 		goto err_free2;
3777 
3778 	for (area = 0; area < nr_vms; area++) {
3779 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3780 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3781 		if (!vas[area] || !vms[area])
3782 			goto err_free;
3783 	}
3784 retry:
3785 	spin_lock(&free_vmap_area_lock);
3786 
3787 	/* start scanning - we scan from the top, begin with the last area */
3788 	area = term_area = last_area;
3789 	start = offsets[area];
3790 	end = start + sizes[area];
3791 
3792 	va = pvm_find_va_enclose_addr(vmalloc_end);
3793 	base = pvm_determine_end_from_reverse(&va, align) - end;
3794 
3795 	while (true) {
3796 		/*
3797 		 * base might have underflowed, add last_end before
3798 		 * comparing.
3799 		 */
3800 		if (base + last_end < vmalloc_start + last_end)
3801 			goto overflow;
3802 
3803 		/*
3804 		 * Fitting base has not been found.
3805 		 */
3806 		if (va == NULL)
3807 			goto overflow;
3808 
3809 		/*
3810 		 * If required width exceeds current VA block, move
3811 		 * base downwards and then recheck.
3812 		 */
3813 		if (base + end > va->va_end) {
3814 			base = pvm_determine_end_from_reverse(&va, align) - end;
3815 			term_area = area;
3816 			continue;
3817 		}
3818 
3819 		/*
3820 		 * If this VA does not fit, move base downwards and recheck.
3821 		 */
3822 		if (base + start < va->va_start) {
3823 			va = node_to_va(rb_prev(&va->rb_node));
3824 			base = pvm_determine_end_from_reverse(&va, align) - end;
3825 			term_area = area;
3826 			continue;
3827 		}
3828 
3829 		/*
3830 		 * This area fits, move on to the previous one.  If
3831 		 * the previous one is the terminal one, we're done.
3832 		 */
3833 		area = (area + nr_vms - 1) % nr_vms;
3834 		if (area == term_area)
3835 			break;
3836 
3837 		start = offsets[area];
3838 		end = start + sizes[area];
3839 		va = pvm_find_va_enclose_addr(base + end);
3840 	}
3841 
3842 	/* we've found a fitting base, insert all va's */
3843 	for (area = 0; area < nr_vms; area++) {
3844 		int ret;
3845 
3846 		start = base + offsets[area];
3847 		size = sizes[area];
3848 
3849 		va = pvm_find_va_enclose_addr(start);
3850 		if (WARN_ON_ONCE(va == NULL))
3851 			/* It is a BUG(), but trigger recovery instead. */
3852 			goto recovery;
3853 
3854 		type = classify_va_fit_type(va, start, size);
3855 		if (WARN_ON_ONCE(type == NOTHING_FIT))
3856 			/* It is a BUG(), but trigger recovery instead. */
3857 			goto recovery;
3858 
3859 		ret = adjust_va_to_fit_type(va, start, size, type);
3860 		if (unlikely(ret))
3861 			goto recovery;
3862 
3863 		/* Allocated area. */
3864 		va = vas[area];
3865 		va->va_start = start;
3866 		va->va_end = start + size;
3867 	}
3868 
3869 	spin_unlock(&free_vmap_area_lock);
3870 
3871 	/* populate the kasan shadow space */
3872 	for (area = 0; area < nr_vms; area++) {
3873 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3874 			goto err_free_shadow;
3875 	}
3876 
3877 	/* insert all vm's */
3878 	spin_lock(&vmap_area_lock);
3879 	for (area = 0; area < nr_vms; area++) {
3880 		insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3881 
3882 		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3883 				 pcpu_get_vm_areas);
3884 	}
3885 	spin_unlock(&vmap_area_lock);
3886 
3887 	/*
3888 	 * Mark allocated areas as accessible. Do it now as a best-effort
3889 	 * approach, as they can be mapped outside of vmalloc code.
3890 	 * With hardware tag-based KASAN, marking is skipped for
3891 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3892 	 */
3893 	for (area = 0; area < nr_vms; area++)
3894 		vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
3895 				vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
3896 
3897 	kfree(vas);
3898 	return vms;
3899 
3900 recovery:
3901 	/*
3902 	 * Remove previously allocated areas. There is no
3903 	 * need in removing these areas from the busy tree,
3904 	 * because they are inserted only on the final step
3905 	 * and when pcpu_get_vm_areas() is success.
3906 	 */
3907 	while (area--) {
3908 		orig_start = vas[area]->va_start;
3909 		orig_end = vas[area]->va_end;
3910 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3911 				&free_vmap_area_list);
3912 		if (va)
3913 			kasan_release_vmalloc(orig_start, orig_end,
3914 				va->va_start, va->va_end);
3915 		vas[area] = NULL;
3916 	}
3917 
3918 overflow:
3919 	spin_unlock(&free_vmap_area_lock);
3920 	if (!purged) {
3921 		purge_vmap_area_lazy();
3922 		purged = true;
3923 
3924 		/* Before "retry", check if we recover. */
3925 		for (area = 0; area < nr_vms; area++) {
3926 			if (vas[area])
3927 				continue;
3928 
3929 			vas[area] = kmem_cache_zalloc(
3930 				vmap_area_cachep, GFP_KERNEL);
3931 			if (!vas[area])
3932 				goto err_free;
3933 		}
3934 
3935 		goto retry;
3936 	}
3937 
3938 err_free:
3939 	for (area = 0; area < nr_vms; area++) {
3940 		if (vas[area])
3941 			kmem_cache_free(vmap_area_cachep, vas[area]);
3942 
3943 		kfree(vms[area]);
3944 	}
3945 err_free2:
3946 	kfree(vas);
3947 	kfree(vms);
3948 	return NULL;
3949 
3950 err_free_shadow:
3951 	spin_lock(&free_vmap_area_lock);
3952 	/*
3953 	 * We release all the vmalloc shadows, even the ones for regions that
3954 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
3955 	 * being able to tolerate this case.
3956 	 */
3957 	for (area = 0; area < nr_vms; area++) {
3958 		orig_start = vas[area]->va_start;
3959 		orig_end = vas[area]->va_end;
3960 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3961 				&free_vmap_area_list);
3962 		if (va)
3963 			kasan_release_vmalloc(orig_start, orig_end,
3964 				va->va_start, va->va_end);
3965 		vas[area] = NULL;
3966 		kfree(vms[area]);
3967 	}
3968 	spin_unlock(&free_vmap_area_lock);
3969 	kfree(vas);
3970 	kfree(vms);
3971 	return NULL;
3972 }
3973 
3974 /**
3975  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3976  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3977  * @nr_vms: the number of allocated areas
3978  *
3979  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3980  */
3981 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3982 {
3983 	int i;
3984 
3985 	for (i = 0; i < nr_vms; i++)
3986 		free_vm_area(vms[i]);
3987 	kfree(vms);
3988 }
3989 #endif	/* CONFIG_SMP */
3990 
3991 #ifdef CONFIG_PRINTK
3992 bool vmalloc_dump_obj(void *object)
3993 {
3994 	struct vm_struct *vm;
3995 	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
3996 
3997 	vm = find_vm_area(objp);
3998 	if (!vm)
3999 		return false;
4000 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4001 		vm->nr_pages, (unsigned long)vm->addr, vm->caller);
4002 	return true;
4003 }
4004 #endif
4005 
4006 #ifdef CONFIG_PROC_FS
4007 static void *s_start(struct seq_file *m, loff_t *pos)
4008 	__acquires(&vmap_purge_lock)
4009 	__acquires(&vmap_area_lock)
4010 {
4011 	mutex_lock(&vmap_purge_lock);
4012 	spin_lock(&vmap_area_lock);
4013 
4014 	return seq_list_start(&vmap_area_list, *pos);
4015 }
4016 
4017 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4018 {
4019 	return seq_list_next(p, &vmap_area_list, pos);
4020 }
4021 
4022 static void s_stop(struct seq_file *m, void *p)
4023 	__releases(&vmap_area_lock)
4024 	__releases(&vmap_purge_lock)
4025 {
4026 	spin_unlock(&vmap_area_lock);
4027 	mutex_unlock(&vmap_purge_lock);
4028 }
4029 
4030 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4031 {
4032 	if (IS_ENABLED(CONFIG_NUMA)) {
4033 		unsigned int nr, *counters = m->private;
4034 		unsigned int step = 1U << vm_area_page_order(v);
4035 
4036 		if (!counters)
4037 			return;
4038 
4039 		if (v->flags & VM_UNINITIALIZED)
4040 			return;
4041 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4042 		smp_rmb();
4043 
4044 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4045 
4046 		for (nr = 0; nr < v->nr_pages; nr += step)
4047 			counters[page_to_nid(v->pages[nr])] += step;
4048 		for_each_node_state(nr, N_HIGH_MEMORY)
4049 			if (counters[nr])
4050 				seq_printf(m, " N%u=%u", nr, counters[nr]);
4051 	}
4052 }
4053 
4054 static void show_purge_info(struct seq_file *m)
4055 {
4056 	struct vmap_area *va;
4057 
4058 	spin_lock(&purge_vmap_area_lock);
4059 	list_for_each_entry(va, &purge_vmap_area_list, list) {
4060 		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4061 			(void *)va->va_start, (void *)va->va_end,
4062 			va->va_end - va->va_start);
4063 	}
4064 	spin_unlock(&purge_vmap_area_lock);
4065 }
4066 
4067 static int s_show(struct seq_file *m, void *p)
4068 {
4069 	struct vmap_area *va;
4070 	struct vm_struct *v;
4071 
4072 	va = list_entry(p, struct vmap_area, list);
4073 
4074 	/*
4075 	 * s_show can encounter race with remove_vm_area, !vm on behalf
4076 	 * of vmap area is being tear down or vm_map_ram allocation.
4077 	 */
4078 	if (!va->vm) {
4079 		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
4080 			(void *)va->va_start, (void *)va->va_end,
4081 			va->va_end - va->va_start);
4082 
4083 		goto final;
4084 	}
4085 
4086 	v = va->vm;
4087 
4088 	seq_printf(m, "0x%pK-0x%pK %7ld",
4089 		v->addr, v->addr + v->size, v->size);
4090 
4091 	if (v->caller)
4092 		seq_printf(m, " %pS", v->caller);
4093 
4094 	if (v->nr_pages)
4095 		seq_printf(m, " pages=%d", v->nr_pages);
4096 
4097 	if (v->phys_addr)
4098 		seq_printf(m, " phys=%pa", &v->phys_addr);
4099 
4100 	if (v->flags & VM_IOREMAP)
4101 		seq_puts(m, " ioremap");
4102 
4103 	if (v->flags & VM_ALLOC)
4104 		seq_puts(m, " vmalloc");
4105 
4106 	if (v->flags & VM_MAP)
4107 		seq_puts(m, " vmap");
4108 
4109 	if (v->flags & VM_USERMAP)
4110 		seq_puts(m, " user");
4111 
4112 	if (v->flags & VM_DMA_COHERENT)
4113 		seq_puts(m, " dma-coherent");
4114 
4115 	if (is_vmalloc_addr(v->pages))
4116 		seq_puts(m, " vpages");
4117 
4118 	show_numa_info(m, v);
4119 	seq_putc(m, '\n');
4120 
4121 	/*
4122 	 * As a final step, dump "unpurged" areas.
4123 	 */
4124 final:
4125 	if (list_is_last(&va->list, &vmap_area_list))
4126 		show_purge_info(m);
4127 
4128 	return 0;
4129 }
4130 
4131 static const struct seq_operations vmalloc_op = {
4132 	.start = s_start,
4133 	.next = s_next,
4134 	.stop = s_stop,
4135 	.show = s_show,
4136 };
4137 
4138 static int __init proc_vmalloc_init(void)
4139 {
4140 	if (IS_ENABLED(CONFIG_NUMA))
4141 		proc_create_seq_private("vmallocinfo", 0400, NULL,
4142 				&vmalloc_op,
4143 				nr_node_ids * sizeof(unsigned int), NULL);
4144 	else
4145 		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
4146 	return 0;
4147 }
4148 module_init(proc_vmalloc_init);
4149 
4150 #endif
4151