xref: /openbmc/linux/mm/vmalloc.c (revision 935d4f0c6dc8b3533e6e39346de7389a84490178)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  Copyright (C) 1993  Linus Torvalds
41da177e4SLinus Torvalds  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
51da177e4SLinus Torvalds  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
61da177e4SLinus Torvalds  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7930fc45aSChristoph Lameter  *  Numa awareness, Christoph Lameter, SGI, June 2005
8d758ffe6SUladzislau Rezki (Sony)  *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds 
11db64fe02SNick Piggin #include <linux/vmalloc.h>
121da177e4SLinus Torvalds #include <linux/mm.h>
131da177e4SLinus Torvalds #include <linux/module.h>
141da177e4SLinus Torvalds #include <linux/highmem.h>
15c3edc401SIngo Molnar #include <linux/sched/signal.h>
161da177e4SLinus Torvalds #include <linux/slab.h>
171da177e4SLinus Torvalds #include <linux/spinlock.h>
181da177e4SLinus Torvalds #include <linux/interrupt.h>
195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h>
20a10aa579SChristoph Lameter #include <linux/seq_file.h>
21868b104dSRick Edgecombe #include <linux/set_memory.h>
223ac7fe5aSThomas Gleixner #include <linux/debugobjects.h>
2323016969SChristoph Lameter #include <linux/kallsyms.h>
24db64fe02SNick Piggin #include <linux/list.h>
254da56b99SChris Wilson #include <linux/notifier.h>
26db64fe02SNick Piggin #include <linux/rbtree.h>
270f14599cSMatthew Wilcox (Oracle) #include <linux/xarray.h>
285da96bddSMel Gorman #include <linux/io.h>
29db64fe02SNick Piggin #include <linux/rcupdate.h>
30f0aa6617STejun Heo #include <linux/pfn.h>
3189219d37SCatalin Marinas #include <linux/kmemleak.h>
3260063497SArun Sharma #include <linux/atomic.h>
333b32123dSGideon Israel Dsouza #include <linux/compiler.h>
344e5aa1f4SShakeel Butt #include <linux/memcontrol.h>
3532fcfd40SAl Viro #include <linux/llist.h>
364c91c07cSLorenzo Stoakes #include <linux/uio.h>
370f616be1SToshi Kani #include <linux/bitops.h>
3868ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h>
39bdebd6a2SJann Horn #include <linux/overflow.h>
40c0eb315aSNicholas Piggin #include <linux/pgtable.h>
41f7ee1f13SChristophe Leroy #include <linux/hugetlb.h>
42451769ebSMichal Hocko #include <linux/sched/mm.h>
431da177e4SLinus Torvalds #include <asm/tlbflush.h>
442dca6999SDavid Miller #include <asm/shmparam.h>
451da177e4SLinus Torvalds 
46cf243da6SUladzislau Rezki (Sony) #define CREATE_TRACE_POINTS
47cf243da6SUladzislau Rezki (Sony) #include <trace/events/vmalloc.h>
48cf243da6SUladzislau Rezki (Sony) 
49dd56b046SMel Gorman #include "internal.h"
502a681cfaSJoerg Roedel #include "pgalloc-track.h"
51dd56b046SMel Gorman 
5282a70ce0SChristoph Hellwig #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
5382a70ce0SChristoph Hellwig static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
5482a70ce0SChristoph Hellwig 
5582a70ce0SChristoph Hellwig static int __init set_nohugeiomap(char *str)
5682a70ce0SChristoph Hellwig {
5782a70ce0SChristoph Hellwig 	ioremap_max_page_shift = PAGE_SHIFT;
5882a70ce0SChristoph Hellwig 	return 0;
5982a70ce0SChristoph Hellwig }
6082a70ce0SChristoph Hellwig early_param("nohugeiomap", set_nohugeiomap);
6182a70ce0SChristoph Hellwig #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
6282a70ce0SChristoph Hellwig static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
6382a70ce0SChristoph Hellwig #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
6482a70ce0SChristoph Hellwig 
65121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
66121e6f32SNicholas Piggin static bool __ro_after_init vmap_allow_huge = true;
67121e6f32SNicholas Piggin 
68121e6f32SNicholas Piggin static int __init set_nohugevmalloc(char *str)
69121e6f32SNicholas Piggin {
70121e6f32SNicholas Piggin 	vmap_allow_huge = false;
71121e6f32SNicholas Piggin 	return 0;
72121e6f32SNicholas Piggin }
73121e6f32SNicholas Piggin early_param("nohugevmalloc", set_nohugevmalloc);
74121e6f32SNicholas Piggin #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
75121e6f32SNicholas Piggin static const bool vmap_allow_huge = false;
76121e6f32SNicholas Piggin #endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
77121e6f32SNicholas Piggin 
78186525bdSIngo Molnar bool is_vmalloc_addr(const void *x)
79186525bdSIngo Molnar {
804aff1dc4SAndrey Konovalov 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
81186525bdSIngo Molnar 
82186525bdSIngo Molnar 	return addr >= VMALLOC_START && addr < VMALLOC_END;
83186525bdSIngo Molnar }
84186525bdSIngo Molnar EXPORT_SYMBOL(is_vmalloc_addr);
85186525bdSIngo Molnar 
8632fcfd40SAl Viro struct vfree_deferred {
8732fcfd40SAl Viro 	struct llist_head list;
8832fcfd40SAl Viro 	struct work_struct wq;
8932fcfd40SAl Viro };
9032fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
9132fcfd40SAl Viro 
92db64fe02SNick Piggin /*** Page table manipulation functions ***/
935e9e3d77SNicholas Piggin static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
945e9e3d77SNicholas Piggin 			phys_addr_t phys_addr, pgprot_t prot,
95f7ee1f13SChristophe Leroy 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
965e9e3d77SNicholas Piggin {
975e9e3d77SNicholas Piggin 	pte_t *pte;
985e9e3d77SNicholas Piggin 	u64 pfn;
99f7ee1f13SChristophe Leroy 	unsigned long size = PAGE_SIZE;
1005e9e3d77SNicholas Piggin 
1015e9e3d77SNicholas Piggin 	pfn = phys_addr >> PAGE_SHIFT;
1025e9e3d77SNicholas Piggin 	pte = pte_alloc_kernel_track(pmd, addr, mask);
1035e9e3d77SNicholas Piggin 	if (!pte)
1045e9e3d77SNicholas Piggin 		return -ENOMEM;
1055e9e3d77SNicholas Piggin 	do {
106c33c7948SRyan Roberts 		BUG_ON(!pte_none(ptep_get(pte)));
107f7ee1f13SChristophe Leroy 
108f7ee1f13SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE
109f7ee1f13SChristophe Leroy 		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
110f7ee1f13SChristophe Leroy 		if (size != PAGE_SIZE) {
111f7ee1f13SChristophe Leroy 			pte_t entry = pfn_pte(pfn, prot);
112f7ee1f13SChristophe Leroy 
113f7ee1f13SChristophe Leroy 			entry = arch_make_huge_pte(entry, ilog2(size), 0);
114*935d4f0cSRyan Roberts 			set_huge_pte_at(&init_mm, addr, pte, entry, size);
115f7ee1f13SChristophe Leroy 			pfn += PFN_DOWN(size);
116f7ee1f13SChristophe Leroy 			continue;
117f7ee1f13SChristophe Leroy 		}
118f7ee1f13SChristophe Leroy #endif
1195e9e3d77SNicholas Piggin 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
1205e9e3d77SNicholas Piggin 		pfn++;
121f7ee1f13SChristophe Leroy 	} while (pte += PFN_DOWN(size), addr += size, addr != end);
1225e9e3d77SNicholas Piggin 	*mask |= PGTBL_PTE_MODIFIED;
1235e9e3d77SNicholas Piggin 	return 0;
1245e9e3d77SNicholas Piggin }
1255e9e3d77SNicholas Piggin 
1265e9e3d77SNicholas Piggin static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
1275e9e3d77SNicholas Piggin 			phys_addr_t phys_addr, pgprot_t prot,
1285e9e3d77SNicholas Piggin 			unsigned int max_page_shift)
1295e9e3d77SNicholas Piggin {
1305e9e3d77SNicholas Piggin 	if (max_page_shift < PMD_SHIFT)
1315e9e3d77SNicholas Piggin 		return 0;
1325e9e3d77SNicholas Piggin 
1335e9e3d77SNicholas Piggin 	if (!arch_vmap_pmd_supported(prot))
1345e9e3d77SNicholas Piggin 		return 0;
1355e9e3d77SNicholas Piggin 
1365e9e3d77SNicholas Piggin 	if ((end - addr) != PMD_SIZE)
1375e9e3d77SNicholas Piggin 		return 0;
1385e9e3d77SNicholas Piggin 
1395e9e3d77SNicholas Piggin 	if (!IS_ALIGNED(addr, PMD_SIZE))
1405e9e3d77SNicholas Piggin 		return 0;
1415e9e3d77SNicholas Piggin 
1425e9e3d77SNicholas Piggin 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
1435e9e3d77SNicholas Piggin 		return 0;
1445e9e3d77SNicholas Piggin 
1455e9e3d77SNicholas Piggin 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
1465e9e3d77SNicholas Piggin 		return 0;
1475e9e3d77SNicholas Piggin 
1485e9e3d77SNicholas Piggin 	return pmd_set_huge(pmd, phys_addr, prot);
1495e9e3d77SNicholas Piggin }
1505e9e3d77SNicholas Piggin 
1515e9e3d77SNicholas Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
1525e9e3d77SNicholas Piggin 			phys_addr_t phys_addr, pgprot_t prot,
1535e9e3d77SNicholas Piggin 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
1545e9e3d77SNicholas Piggin {
1555e9e3d77SNicholas Piggin 	pmd_t *pmd;
1565e9e3d77SNicholas Piggin 	unsigned long next;
1575e9e3d77SNicholas Piggin 
1585e9e3d77SNicholas Piggin 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
1595e9e3d77SNicholas Piggin 	if (!pmd)
1605e9e3d77SNicholas Piggin 		return -ENOMEM;
1615e9e3d77SNicholas Piggin 	do {
1625e9e3d77SNicholas Piggin 		next = pmd_addr_end(addr, end);
1635e9e3d77SNicholas Piggin 
1645e9e3d77SNicholas Piggin 		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
1655e9e3d77SNicholas Piggin 					max_page_shift)) {
1665e9e3d77SNicholas Piggin 			*mask |= PGTBL_PMD_MODIFIED;
1675e9e3d77SNicholas Piggin 			continue;
1685e9e3d77SNicholas Piggin 		}
1695e9e3d77SNicholas Piggin 
170f7ee1f13SChristophe Leroy 		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
1715e9e3d77SNicholas Piggin 			return -ENOMEM;
1725e9e3d77SNicholas Piggin 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
1735e9e3d77SNicholas Piggin 	return 0;
1745e9e3d77SNicholas Piggin }
1755e9e3d77SNicholas Piggin 
1765e9e3d77SNicholas Piggin static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
1775e9e3d77SNicholas Piggin 			phys_addr_t phys_addr, pgprot_t prot,
1785e9e3d77SNicholas Piggin 			unsigned int max_page_shift)
1795e9e3d77SNicholas Piggin {
1805e9e3d77SNicholas Piggin 	if (max_page_shift < PUD_SHIFT)
1815e9e3d77SNicholas Piggin 		return 0;
1825e9e3d77SNicholas Piggin 
1835e9e3d77SNicholas Piggin 	if (!arch_vmap_pud_supported(prot))
1845e9e3d77SNicholas Piggin 		return 0;
1855e9e3d77SNicholas Piggin 
1865e9e3d77SNicholas Piggin 	if ((end - addr) != PUD_SIZE)
1875e9e3d77SNicholas Piggin 		return 0;
1885e9e3d77SNicholas Piggin 
1895e9e3d77SNicholas Piggin 	if (!IS_ALIGNED(addr, PUD_SIZE))
1905e9e3d77SNicholas Piggin 		return 0;
1915e9e3d77SNicholas Piggin 
1925e9e3d77SNicholas Piggin 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
1935e9e3d77SNicholas Piggin 		return 0;
1945e9e3d77SNicholas Piggin 
1955e9e3d77SNicholas Piggin 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
1965e9e3d77SNicholas Piggin 		return 0;
1975e9e3d77SNicholas Piggin 
1985e9e3d77SNicholas Piggin 	return pud_set_huge(pud, phys_addr, prot);
1995e9e3d77SNicholas Piggin }
2005e9e3d77SNicholas Piggin 
2015e9e3d77SNicholas Piggin static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
2025e9e3d77SNicholas Piggin 			phys_addr_t phys_addr, pgprot_t prot,
2035e9e3d77SNicholas Piggin 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
2045e9e3d77SNicholas Piggin {
2055e9e3d77SNicholas Piggin 	pud_t *pud;
2065e9e3d77SNicholas Piggin 	unsigned long next;
2075e9e3d77SNicholas Piggin 
2085e9e3d77SNicholas Piggin 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
2095e9e3d77SNicholas Piggin 	if (!pud)
2105e9e3d77SNicholas Piggin 		return -ENOMEM;
2115e9e3d77SNicholas Piggin 	do {
2125e9e3d77SNicholas Piggin 		next = pud_addr_end(addr, end);
2135e9e3d77SNicholas Piggin 
2145e9e3d77SNicholas Piggin 		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
2155e9e3d77SNicholas Piggin 					max_page_shift)) {
2165e9e3d77SNicholas Piggin 			*mask |= PGTBL_PUD_MODIFIED;
2175e9e3d77SNicholas Piggin 			continue;
2185e9e3d77SNicholas Piggin 		}
2195e9e3d77SNicholas Piggin 
2205e9e3d77SNicholas Piggin 		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
2215e9e3d77SNicholas Piggin 					max_page_shift, mask))
2225e9e3d77SNicholas Piggin 			return -ENOMEM;
2235e9e3d77SNicholas Piggin 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
2245e9e3d77SNicholas Piggin 	return 0;
2255e9e3d77SNicholas Piggin }
2265e9e3d77SNicholas Piggin 
2275e9e3d77SNicholas Piggin static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
2285e9e3d77SNicholas Piggin 			phys_addr_t phys_addr, pgprot_t prot,
2295e9e3d77SNicholas Piggin 			unsigned int max_page_shift)
2305e9e3d77SNicholas Piggin {
2315e9e3d77SNicholas Piggin 	if (max_page_shift < P4D_SHIFT)
2325e9e3d77SNicholas Piggin 		return 0;
2335e9e3d77SNicholas Piggin 
2345e9e3d77SNicholas Piggin 	if (!arch_vmap_p4d_supported(prot))
2355e9e3d77SNicholas Piggin 		return 0;
2365e9e3d77SNicholas Piggin 
2375e9e3d77SNicholas Piggin 	if ((end - addr) != P4D_SIZE)
2385e9e3d77SNicholas Piggin 		return 0;
2395e9e3d77SNicholas Piggin 
2405e9e3d77SNicholas Piggin 	if (!IS_ALIGNED(addr, P4D_SIZE))
2415e9e3d77SNicholas Piggin 		return 0;
2425e9e3d77SNicholas Piggin 
2435e9e3d77SNicholas Piggin 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
2445e9e3d77SNicholas Piggin 		return 0;
2455e9e3d77SNicholas Piggin 
2465e9e3d77SNicholas Piggin 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
2475e9e3d77SNicholas Piggin 		return 0;
2485e9e3d77SNicholas Piggin 
2495e9e3d77SNicholas Piggin 	return p4d_set_huge(p4d, phys_addr, prot);
2505e9e3d77SNicholas Piggin }
2515e9e3d77SNicholas Piggin 
2525e9e3d77SNicholas Piggin static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
2535e9e3d77SNicholas Piggin 			phys_addr_t phys_addr, pgprot_t prot,
2545e9e3d77SNicholas Piggin 			unsigned int max_page_shift, pgtbl_mod_mask *mask)
2555e9e3d77SNicholas Piggin {
2565e9e3d77SNicholas Piggin 	p4d_t *p4d;
2575e9e3d77SNicholas Piggin 	unsigned long next;
2585e9e3d77SNicholas Piggin 
2595e9e3d77SNicholas Piggin 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
2605e9e3d77SNicholas Piggin 	if (!p4d)
2615e9e3d77SNicholas Piggin 		return -ENOMEM;
2625e9e3d77SNicholas Piggin 	do {
2635e9e3d77SNicholas Piggin 		next = p4d_addr_end(addr, end);
2645e9e3d77SNicholas Piggin 
2655e9e3d77SNicholas Piggin 		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
2665e9e3d77SNicholas Piggin 					max_page_shift)) {
2675e9e3d77SNicholas Piggin 			*mask |= PGTBL_P4D_MODIFIED;
2685e9e3d77SNicholas Piggin 			continue;
2695e9e3d77SNicholas Piggin 		}
2705e9e3d77SNicholas Piggin 
2715e9e3d77SNicholas Piggin 		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
2725e9e3d77SNicholas Piggin 					max_page_shift, mask))
2735e9e3d77SNicholas Piggin 			return -ENOMEM;
2745e9e3d77SNicholas Piggin 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
2755e9e3d77SNicholas Piggin 	return 0;
2765e9e3d77SNicholas Piggin }
2775e9e3d77SNicholas Piggin 
2785d87510dSNicholas Piggin static int vmap_range_noflush(unsigned long addr, unsigned long end,
2795e9e3d77SNicholas Piggin 			phys_addr_t phys_addr, pgprot_t prot,
2805e9e3d77SNicholas Piggin 			unsigned int max_page_shift)
2815e9e3d77SNicholas Piggin {
2825e9e3d77SNicholas Piggin 	pgd_t *pgd;
2835e9e3d77SNicholas Piggin 	unsigned long start;
2845e9e3d77SNicholas Piggin 	unsigned long next;
2855e9e3d77SNicholas Piggin 	int err;
2865e9e3d77SNicholas Piggin 	pgtbl_mod_mask mask = 0;
2875e9e3d77SNicholas Piggin 
2885e9e3d77SNicholas Piggin 	might_sleep();
2895e9e3d77SNicholas Piggin 	BUG_ON(addr >= end);
2905e9e3d77SNicholas Piggin 
2915e9e3d77SNicholas Piggin 	start = addr;
2925e9e3d77SNicholas Piggin 	pgd = pgd_offset_k(addr);
2935e9e3d77SNicholas Piggin 	do {
2945e9e3d77SNicholas Piggin 		next = pgd_addr_end(addr, end);
2955e9e3d77SNicholas Piggin 		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
2965e9e3d77SNicholas Piggin 					max_page_shift, &mask);
2975e9e3d77SNicholas Piggin 		if (err)
2985e9e3d77SNicholas Piggin 			break;
2995e9e3d77SNicholas Piggin 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
3005e9e3d77SNicholas Piggin 
3015e9e3d77SNicholas Piggin 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3025e9e3d77SNicholas Piggin 		arch_sync_kernel_mappings(start, end);
3035e9e3d77SNicholas Piggin 
3045e9e3d77SNicholas Piggin 	return err;
3055e9e3d77SNicholas Piggin }
306b221385bSAdrian Bunk 
30782a70ce0SChristoph Hellwig int ioremap_page_range(unsigned long addr, unsigned long end,
30882a70ce0SChristoph Hellwig 		phys_addr_t phys_addr, pgprot_t prot)
3095d87510dSNicholas Piggin {
3105d87510dSNicholas Piggin 	int err;
3115d87510dSNicholas Piggin 
3128491502fSChristoph Hellwig 	err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
31382a70ce0SChristoph Hellwig 				 ioremap_max_page_shift);
3145d87510dSNicholas Piggin 	flush_cache_vmap(addr, end);
315b073d7f8SAlexander Potapenko 	if (!err)
316fdea03e1SAlexander Potapenko 		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
317b073d7f8SAlexander Potapenko 					       ioremap_max_page_shift);
3185d87510dSNicholas Piggin 	return err;
3195d87510dSNicholas Piggin }
3205d87510dSNicholas Piggin 
3212ba3e694SJoerg Roedel static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
3222ba3e694SJoerg Roedel 			     pgtbl_mod_mask *mask)
3231da177e4SLinus Torvalds {
3241da177e4SLinus Torvalds 	pte_t *pte;
3251da177e4SLinus Torvalds 
3261da177e4SLinus Torvalds 	pte = pte_offset_kernel(pmd, addr);
3271da177e4SLinus Torvalds 	do {
3281da177e4SLinus Torvalds 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
3291da177e4SLinus Torvalds 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
3301da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
3312ba3e694SJoerg Roedel 	*mask |= PGTBL_PTE_MODIFIED;
3321da177e4SLinus Torvalds }
3331da177e4SLinus Torvalds 
3342ba3e694SJoerg Roedel static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
3352ba3e694SJoerg Roedel 			     pgtbl_mod_mask *mask)
3361da177e4SLinus Torvalds {
3371da177e4SLinus Torvalds 	pmd_t *pmd;
3381da177e4SLinus Torvalds 	unsigned long next;
3392ba3e694SJoerg Roedel 	int cleared;
3401da177e4SLinus Torvalds 
3411da177e4SLinus Torvalds 	pmd = pmd_offset(pud, addr);
3421da177e4SLinus Torvalds 	do {
3431da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
3442ba3e694SJoerg Roedel 
3452ba3e694SJoerg Roedel 		cleared = pmd_clear_huge(pmd);
3462ba3e694SJoerg Roedel 		if (cleared || pmd_bad(*pmd))
3472ba3e694SJoerg Roedel 			*mask |= PGTBL_PMD_MODIFIED;
3482ba3e694SJoerg Roedel 
3492ba3e694SJoerg Roedel 		if (cleared)
350b9820d8fSToshi Kani 			continue;
3511da177e4SLinus Torvalds 		if (pmd_none_or_clear_bad(pmd))
3521da177e4SLinus Torvalds 			continue;
3532ba3e694SJoerg Roedel 		vunmap_pte_range(pmd, addr, next, mask);
354e47110e9SAneesh Kumar K.V 
355e47110e9SAneesh Kumar K.V 		cond_resched();
3561da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
3571da177e4SLinus Torvalds }
3581da177e4SLinus Torvalds 
3592ba3e694SJoerg Roedel static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
3602ba3e694SJoerg Roedel 			     pgtbl_mod_mask *mask)
3611da177e4SLinus Torvalds {
3621da177e4SLinus Torvalds 	pud_t *pud;
3631da177e4SLinus Torvalds 	unsigned long next;
3642ba3e694SJoerg Roedel 	int cleared;
3651da177e4SLinus Torvalds 
366c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, addr);
3671da177e4SLinus Torvalds 	do {
3681da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
3692ba3e694SJoerg Roedel 
3702ba3e694SJoerg Roedel 		cleared = pud_clear_huge(pud);
3712ba3e694SJoerg Roedel 		if (cleared || pud_bad(*pud))
3722ba3e694SJoerg Roedel 			*mask |= PGTBL_PUD_MODIFIED;
3732ba3e694SJoerg Roedel 
3742ba3e694SJoerg Roedel 		if (cleared)
375b9820d8fSToshi Kani 			continue;
3761da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(pud))
3771da177e4SLinus Torvalds 			continue;
3782ba3e694SJoerg Roedel 		vunmap_pmd_range(pud, addr, next, mask);
3791da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
3801da177e4SLinus Torvalds }
3811da177e4SLinus Torvalds 
3822ba3e694SJoerg Roedel static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
3832ba3e694SJoerg Roedel 			     pgtbl_mod_mask *mask)
384c2febafcSKirill A. Shutemov {
385c2febafcSKirill A. Shutemov 	p4d_t *p4d;
386c2febafcSKirill A. Shutemov 	unsigned long next;
387c2febafcSKirill A. Shutemov 
388c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, addr);
389c2febafcSKirill A. Shutemov 	do {
390c2febafcSKirill A. Shutemov 		next = p4d_addr_end(addr, end);
3912ba3e694SJoerg Roedel 
392c8db8c26SLi kunyu 		p4d_clear_huge(p4d);
393c8db8c26SLi kunyu 		if (p4d_bad(*p4d))
3942ba3e694SJoerg Roedel 			*mask |= PGTBL_P4D_MODIFIED;
3952ba3e694SJoerg Roedel 
396c2febafcSKirill A. Shutemov 		if (p4d_none_or_clear_bad(p4d))
397c2febafcSKirill A. Shutemov 			continue;
3982ba3e694SJoerg Roedel 		vunmap_pud_range(p4d, addr, next, mask);
399c2febafcSKirill A. Shutemov 	} while (p4d++, addr = next, addr != end);
400c2febafcSKirill A. Shutemov }
401c2febafcSKirill A. Shutemov 
4024ad0ae8cSNicholas Piggin /*
4034ad0ae8cSNicholas Piggin  * vunmap_range_noflush is similar to vunmap_range, but does not
4044ad0ae8cSNicholas Piggin  * flush caches or TLBs.
405b521c43fSChristoph Hellwig  *
4064ad0ae8cSNicholas Piggin  * The caller is responsible for calling flush_cache_vmap() before calling
4074ad0ae8cSNicholas Piggin  * this function, and flush_tlb_kernel_range after it has returned
4084ad0ae8cSNicholas Piggin  * successfully (and before the addresses are expected to cause a page fault
4094ad0ae8cSNicholas Piggin  * or be re-mapped for something else, if TLB flushes are being delayed or
4104ad0ae8cSNicholas Piggin  * coalesced).
411b521c43fSChristoph Hellwig  *
4124ad0ae8cSNicholas Piggin  * This is an internal function only. Do not use outside mm/.
413b521c43fSChristoph Hellwig  */
414b073d7f8SAlexander Potapenko void __vunmap_range_noflush(unsigned long start, unsigned long end)
4151da177e4SLinus Torvalds {
4161da177e4SLinus Torvalds 	unsigned long next;
417b521c43fSChristoph Hellwig 	pgd_t *pgd;
4182ba3e694SJoerg Roedel 	unsigned long addr = start;
4192ba3e694SJoerg Roedel 	pgtbl_mod_mask mask = 0;
4201da177e4SLinus Torvalds 
4211da177e4SLinus Torvalds 	BUG_ON(addr >= end);
4221da177e4SLinus Torvalds 	pgd = pgd_offset_k(addr);
4231da177e4SLinus Torvalds 	do {
4241da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
4252ba3e694SJoerg Roedel 		if (pgd_bad(*pgd))
4262ba3e694SJoerg Roedel 			mask |= PGTBL_PGD_MODIFIED;
4271da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(pgd))
4281da177e4SLinus Torvalds 			continue;
4292ba3e694SJoerg Roedel 		vunmap_p4d_range(pgd, addr, next, &mask);
4301da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
4312ba3e694SJoerg Roedel 
4322ba3e694SJoerg Roedel 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
4332ba3e694SJoerg Roedel 		arch_sync_kernel_mappings(start, end);
4341da177e4SLinus Torvalds }
4351da177e4SLinus Torvalds 
436b073d7f8SAlexander Potapenko void vunmap_range_noflush(unsigned long start, unsigned long end)
437b073d7f8SAlexander Potapenko {
438b073d7f8SAlexander Potapenko 	kmsan_vunmap_range_noflush(start, end);
439b073d7f8SAlexander Potapenko 	__vunmap_range_noflush(start, end);
440b073d7f8SAlexander Potapenko }
441b073d7f8SAlexander Potapenko 
4424ad0ae8cSNicholas Piggin /**
4434ad0ae8cSNicholas Piggin  * vunmap_range - unmap kernel virtual addresses
4444ad0ae8cSNicholas Piggin  * @addr: start of the VM area to unmap
4454ad0ae8cSNicholas Piggin  * @end: end of the VM area to unmap (non-inclusive)
4464ad0ae8cSNicholas Piggin  *
4474ad0ae8cSNicholas Piggin  * Clears any present PTEs in the virtual address range, flushes TLBs and
4484ad0ae8cSNicholas Piggin  * caches. Any subsequent access to the address before it has been re-mapped
4494ad0ae8cSNicholas Piggin  * is a kernel bug.
4504ad0ae8cSNicholas Piggin  */
4514ad0ae8cSNicholas Piggin void vunmap_range(unsigned long addr, unsigned long end)
4524ad0ae8cSNicholas Piggin {
4534ad0ae8cSNicholas Piggin 	flush_cache_vunmap(addr, end);
4544ad0ae8cSNicholas Piggin 	vunmap_range_noflush(addr, end);
4554ad0ae8cSNicholas Piggin 	flush_tlb_kernel_range(addr, end);
4564ad0ae8cSNicholas Piggin }
4574ad0ae8cSNicholas Piggin 
4580a264884SNicholas Piggin static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
4592ba3e694SJoerg Roedel 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
4602ba3e694SJoerg Roedel 		pgtbl_mod_mask *mask)
4611da177e4SLinus Torvalds {
4621da177e4SLinus Torvalds 	pte_t *pte;
4631da177e4SLinus Torvalds 
464db64fe02SNick Piggin 	/*
465db64fe02SNick Piggin 	 * nr is a running index into the array which helps higher level
466db64fe02SNick Piggin 	 * callers keep track of where we're up to.
467db64fe02SNick Piggin 	 */
468db64fe02SNick Piggin 
4692ba3e694SJoerg Roedel 	pte = pte_alloc_kernel_track(pmd, addr, mask);
4701da177e4SLinus Torvalds 	if (!pte)
4711da177e4SLinus Torvalds 		return -ENOMEM;
4721da177e4SLinus Torvalds 	do {
473db64fe02SNick Piggin 		struct page *page = pages[*nr];
474db64fe02SNick Piggin 
475c33c7948SRyan Roberts 		if (WARN_ON(!pte_none(ptep_get(pte))))
476db64fe02SNick Piggin 			return -EBUSY;
477db64fe02SNick Piggin 		if (WARN_ON(!page))
4781da177e4SLinus Torvalds 			return -ENOMEM;
4794fcdcc12SYury Norov 		if (WARN_ON(!pfn_valid(page_to_pfn(page))))
4804fcdcc12SYury Norov 			return -EINVAL;
4814fcdcc12SYury Norov 
4821da177e4SLinus Torvalds 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
483db64fe02SNick Piggin 		(*nr)++;
4841da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
4852ba3e694SJoerg Roedel 	*mask |= PGTBL_PTE_MODIFIED;
4861da177e4SLinus Torvalds 	return 0;
4871da177e4SLinus Torvalds }
4881da177e4SLinus Torvalds 
4890a264884SNicholas Piggin static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
4902ba3e694SJoerg Roedel 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
4912ba3e694SJoerg Roedel 		pgtbl_mod_mask *mask)
4921da177e4SLinus Torvalds {
4931da177e4SLinus Torvalds 	pmd_t *pmd;
4941da177e4SLinus Torvalds 	unsigned long next;
4951da177e4SLinus Torvalds 
4962ba3e694SJoerg Roedel 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
4971da177e4SLinus Torvalds 	if (!pmd)
4981da177e4SLinus Torvalds 		return -ENOMEM;
4991da177e4SLinus Torvalds 	do {
5001da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
5010a264884SNicholas Piggin 		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
5021da177e4SLinus Torvalds 			return -ENOMEM;
5031da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
5041da177e4SLinus Torvalds 	return 0;
5051da177e4SLinus Torvalds }
5061da177e4SLinus Torvalds 
5070a264884SNicholas Piggin static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
5082ba3e694SJoerg Roedel 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
5092ba3e694SJoerg Roedel 		pgtbl_mod_mask *mask)
5101da177e4SLinus Torvalds {
5111da177e4SLinus Torvalds 	pud_t *pud;
5121da177e4SLinus Torvalds 	unsigned long next;
5131da177e4SLinus Torvalds 
5142ba3e694SJoerg Roedel 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
5151da177e4SLinus Torvalds 	if (!pud)
5161da177e4SLinus Torvalds 		return -ENOMEM;
5171da177e4SLinus Torvalds 	do {
5181da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
5190a264884SNicholas Piggin 		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
5201da177e4SLinus Torvalds 			return -ENOMEM;
5211da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
5221da177e4SLinus Torvalds 	return 0;
5231da177e4SLinus Torvalds }
5241da177e4SLinus Torvalds 
5250a264884SNicholas Piggin static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
5262ba3e694SJoerg Roedel 		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
5272ba3e694SJoerg Roedel 		pgtbl_mod_mask *mask)
528c2febafcSKirill A. Shutemov {
529c2febafcSKirill A. Shutemov 	p4d_t *p4d;
530c2febafcSKirill A. Shutemov 	unsigned long next;
531c2febafcSKirill A. Shutemov 
5322ba3e694SJoerg Roedel 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
533c2febafcSKirill A. Shutemov 	if (!p4d)
534c2febafcSKirill A. Shutemov 		return -ENOMEM;
535c2febafcSKirill A. Shutemov 	do {
536c2febafcSKirill A. Shutemov 		next = p4d_addr_end(addr, end);
5370a264884SNicholas Piggin 		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
538c2febafcSKirill A. Shutemov 			return -ENOMEM;
539c2febafcSKirill A. Shutemov 	} while (p4d++, addr = next, addr != end);
540c2febafcSKirill A. Shutemov 	return 0;
541c2febafcSKirill A. Shutemov }
542c2febafcSKirill A. Shutemov 
543121e6f32SNicholas Piggin static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
544121e6f32SNicholas Piggin 		pgprot_t prot, struct page **pages)
545121e6f32SNicholas Piggin {
546121e6f32SNicholas Piggin 	unsigned long start = addr;
547121e6f32SNicholas Piggin 	pgd_t *pgd;
548121e6f32SNicholas Piggin 	unsigned long next;
549121e6f32SNicholas Piggin 	int err = 0;
550121e6f32SNicholas Piggin 	int nr = 0;
551121e6f32SNicholas Piggin 	pgtbl_mod_mask mask = 0;
552121e6f32SNicholas Piggin 
553121e6f32SNicholas Piggin 	BUG_ON(addr >= end);
554121e6f32SNicholas Piggin 	pgd = pgd_offset_k(addr);
555121e6f32SNicholas Piggin 	do {
556121e6f32SNicholas Piggin 		next = pgd_addr_end(addr, end);
557121e6f32SNicholas Piggin 		if (pgd_bad(*pgd))
558121e6f32SNicholas Piggin 			mask |= PGTBL_PGD_MODIFIED;
559121e6f32SNicholas Piggin 		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
560121e6f32SNicholas Piggin 		if (err)
561121e6f32SNicholas Piggin 			return err;
562121e6f32SNicholas Piggin 	} while (pgd++, addr = next, addr != end);
563121e6f32SNicholas Piggin 
564121e6f32SNicholas Piggin 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
565121e6f32SNicholas Piggin 		arch_sync_kernel_mappings(start, end);
566121e6f32SNicholas Piggin 
567121e6f32SNicholas Piggin 	return 0;
568121e6f32SNicholas Piggin }
569121e6f32SNicholas Piggin 
570b67177ecSNicholas Piggin /*
571b67177ecSNicholas Piggin  * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
572b67177ecSNicholas Piggin  * flush caches.
573b67177ecSNicholas Piggin  *
574b67177ecSNicholas Piggin  * The caller is responsible for calling flush_cache_vmap() after this
575b67177ecSNicholas Piggin  * function returns successfully and before the addresses are accessed.
576b67177ecSNicholas Piggin  *
577b67177ecSNicholas Piggin  * This is an internal function only. Do not use outside mm/.
578b67177ecSNicholas Piggin  */
579b073d7f8SAlexander Potapenko int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
580121e6f32SNicholas Piggin 		pgprot_t prot, struct page **pages, unsigned int page_shift)
581121e6f32SNicholas Piggin {
582121e6f32SNicholas Piggin 	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
583121e6f32SNicholas Piggin 
584121e6f32SNicholas Piggin 	WARN_ON(page_shift < PAGE_SHIFT);
585121e6f32SNicholas Piggin 
586121e6f32SNicholas Piggin 	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
587121e6f32SNicholas Piggin 			page_shift == PAGE_SHIFT)
588121e6f32SNicholas Piggin 		return vmap_small_pages_range_noflush(addr, end, prot, pages);
589121e6f32SNicholas Piggin 
590121e6f32SNicholas Piggin 	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
591121e6f32SNicholas Piggin 		int err;
592121e6f32SNicholas Piggin 
593121e6f32SNicholas Piggin 		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
59408262ac5SMatthew Wilcox 					page_to_phys(pages[i]), prot,
595121e6f32SNicholas Piggin 					page_shift);
596121e6f32SNicholas Piggin 		if (err)
597121e6f32SNicholas Piggin 			return err;
598121e6f32SNicholas Piggin 
599121e6f32SNicholas Piggin 		addr += 1UL << page_shift;
600121e6f32SNicholas Piggin 	}
601121e6f32SNicholas Piggin 
602121e6f32SNicholas Piggin 	return 0;
603121e6f32SNicholas Piggin }
604121e6f32SNicholas Piggin 
605b073d7f8SAlexander Potapenko int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
606b073d7f8SAlexander Potapenko 		pgprot_t prot, struct page **pages, unsigned int page_shift)
607b073d7f8SAlexander Potapenko {
60847ebd031SAlexander Potapenko 	int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
60947ebd031SAlexander Potapenko 						 page_shift);
61047ebd031SAlexander Potapenko 
61147ebd031SAlexander Potapenko 	if (ret)
61247ebd031SAlexander Potapenko 		return ret;
613b073d7f8SAlexander Potapenko 	return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
614b073d7f8SAlexander Potapenko }
615b073d7f8SAlexander Potapenko 
616b67177ecSNicholas Piggin /**
617b67177ecSNicholas Piggin  * vmap_pages_range - map pages to a kernel virtual address
618b67177ecSNicholas Piggin  * @addr: start of the VM area to map
619b67177ecSNicholas Piggin  * @end: end of the VM area to map (non-inclusive)
620b67177ecSNicholas Piggin  * @prot: page protection flags to use
621b67177ecSNicholas Piggin  * @pages: pages to map (always PAGE_SIZE pages)
622b67177ecSNicholas Piggin  * @page_shift: maximum shift that the pages may be mapped with, @pages must
623b67177ecSNicholas Piggin  * be aligned and contiguous up to at least this shift.
624b67177ecSNicholas Piggin  *
625b67177ecSNicholas Piggin  * RETURNS:
626b67177ecSNicholas Piggin  * 0 on success, -errno on failure.
627b67177ecSNicholas Piggin  */
628121e6f32SNicholas Piggin static int vmap_pages_range(unsigned long addr, unsigned long end,
629121e6f32SNicholas Piggin 		pgprot_t prot, struct page **pages, unsigned int page_shift)
630121e6f32SNicholas Piggin {
631121e6f32SNicholas Piggin 	int err;
632121e6f32SNicholas Piggin 
633121e6f32SNicholas Piggin 	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
634121e6f32SNicholas Piggin 	flush_cache_vmap(addr, end);
635121e6f32SNicholas Piggin 	return err;
636121e6f32SNicholas Piggin }
637121e6f32SNicholas Piggin 
63881ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x)
63973bdf0a6SLinus Torvalds {
64073bdf0a6SLinus Torvalds 	/*
641ab4f2ee1SRussell King 	 * ARM, x86-64 and sparc64 put modules in a special place,
64273bdf0a6SLinus Torvalds 	 * and fall back on vmalloc() if that fails. Others
64373bdf0a6SLinus Torvalds 	 * just put it in the vmalloc space.
64473bdf0a6SLinus Torvalds 	 */
64573bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
6464aff1dc4SAndrey Konovalov 	unsigned long addr = (unsigned long)kasan_reset_tag(x);
64773bdf0a6SLinus Torvalds 	if (addr >= MODULES_VADDR && addr < MODULES_END)
64873bdf0a6SLinus Torvalds 		return 1;
64973bdf0a6SLinus Torvalds #endif
65073bdf0a6SLinus Torvalds 	return is_vmalloc_addr(x);
65173bdf0a6SLinus Torvalds }
65201858469SDavid Howells EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
65373bdf0a6SLinus Torvalds 
65448667e7aSChristoph Lameter /*
655c0eb315aSNicholas Piggin  * Walk a vmap address to the struct page it maps. Huge vmap mappings will
656c0eb315aSNicholas Piggin  * return the tail page that corresponds to the base page address, which
657c0eb315aSNicholas Piggin  * matches small vmap mappings.
65848667e7aSChristoph Lameter  */
659add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr)
66048667e7aSChristoph Lameter {
66148667e7aSChristoph Lameter 	unsigned long addr = (unsigned long) vmalloc_addr;
662add688fbSmalc 	struct page *page = NULL;
66348667e7aSChristoph Lameter 	pgd_t *pgd = pgd_offset_k(addr);
664c2febafcSKirill A. Shutemov 	p4d_t *p4d;
665c2febafcSKirill A. Shutemov 	pud_t *pud;
666c2febafcSKirill A. Shutemov 	pmd_t *pmd;
667c2febafcSKirill A. Shutemov 	pte_t *ptep, pte;
66848667e7aSChristoph Lameter 
6697aa413deSIngo Molnar 	/*
6707aa413deSIngo Molnar 	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
6717aa413deSIngo Molnar 	 * architectures that do not vmalloc module space
6727aa413deSIngo Molnar 	 */
67373bdf0a6SLinus Torvalds 	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
67459ea7463SJiri Slaby 
675c2febafcSKirill A. Shutemov 	if (pgd_none(*pgd))
676c2febafcSKirill A. Shutemov 		return NULL;
677c0eb315aSNicholas Piggin 	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
678c0eb315aSNicholas Piggin 		return NULL; /* XXX: no allowance for huge pgd */
679c0eb315aSNicholas Piggin 	if (WARN_ON_ONCE(pgd_bad(*pgd)))
680c0eb315aSNicholas Piggin 		return NULL;
681c0eb315aSNicholas Piggin 
682c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, addr);
683c2febafcSKirill A. Shutemov 	if (p4d_none(*p4d))
684c2febafcSKirill A. Shutemov 		return NULL;
685c0eb315aSNicholas Piggin 	if (p4d_leaf(*p4d))
686c0eb315aSNicholas Piggin 		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
687c0eb315aSNicholas Piggin 	if (WARN_ON_ONCE(p4d_bad(*p4d)))
688c2febafcSKirill A. Shutemov 		return NULL;
689c0eb315aSNicholas Piggin 
690c0eb315aSNicholas Piggin 	pud = pud_offset(p4d, addr);
691c0eb315aSNicholas Piggin 	if (pud_none(*pud))
692c0eb315aSNicholas Piggin 		return NULL;
693c0eb315aSNicholas Piggin 	if (pud_leaf(*pud))
694c0eb315aSNicholas Piggin 		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
695c0eb315aSNicholas Piggin 	if (WARN_ON_ONCE(pud_bad(*pud)))
696c0eb315aSNicholas Piggin 		return NULL;
697c0eb315aSNicholas Piggin 
698c2febafcSKirill A. Shutemov 	pmd = pmd_offset(pud, addr);
699c0eb315aSNicholas Piggin 	if (pmd_none(*pmd))
700c0eb315aSNicholas Piggin 		return NULL;
701c0eb315aSNicholas Piggin 	if (pmd_leaf(*pmd))
702c0eb315aSNicholas Piggin 		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
703c0eb315aSNicholas Piggin 	if (WARN_ON_ONCE(pmd_bad(*pmd)))
704c2febafcSKirill A. Shutemov 		return NULL;
705db64fe02SNick Piggin 
7060d1c81edSHugh Dickins 	ptep = pte_offset_kernel(pmd, addr);
707c33c7948SRyan Roberts 	pte = ptep_get(ptep);
70848667e7aSChristoph Lameter 	if (pte_present(pte))
709add688fbSmalc 		page = pte_page(pte);
710c0eb315aSNicholas Piggin 
711add688fbSmalc 	return page;
712ece86e22SJianyu Zhan }
713ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page);
714ece86e22SJianyu Zhan 
715add688fbSmalc /*
716add688fbSmalc  * Map a vmalloc()-space virtual address to the physical page frame number.
717add688fbSmalc  */
718add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
719add688fbSmalc {
720add688fbSmalc 	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
721add688fbSmalc }
722add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn);
723add688fbSmalc 
724db64fe02SNick Piggin 
725db64fe02SNick Piggin /*** Global kva allocator ***/
726db64fe02SNick Piggin 
727bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
728a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
729bb850f4dSUladzislau Rezki (Sony) 
730db64fe02SNick Piggin 
731db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock);
732e36176beSUladzislau Rezki (Sony) static DEFINE_SPINLOCK(free_vmap_area_lock);
733f1c4069eSJoonsoo Kim /* Export for kexec only */
734f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list);
73589699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT;
73668ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly;
73789699605SNick Piggin 
73896e2db45SUladzislau Rezki (Sony) static struct rb_root purge_vmap_area_root = RB_ROOT;
73996e2db45SUladzislau Rezki (Sony) static LIST_HEAD(purge_vmap_area_list);
74096e2db45SUladzislau Rezki (Sony) static DEFINE_SPINLOCK(purge_vmap_area_lock);
74196e2db45SUladzislau Rezki (Sony) 
74268ad4a33SUladzislau Rezki (Sony) /*
74368ad4a33SUladzislau Rezki (Sony)  * This kmem_cache is used for vmap_area objects. Instead of
74468ad4a33SUladzislau Rezki (Sony)  * allocating from slab we reuse an object from this cache to
74568ad4a33SUladzislau Rezki (Sony)  * make things faster. Especially in "no edge" splitting of
74668ad4a33SUladzislau Rezki (Sony)  * free block.
74768ad4a33SUladzislau Rezki (Sony)  */
74868ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep;
74989699605SNick Piggin 
75068ad4a33SUladzislau Rezki (Sony) /*
75168ad4a33SUladzislau Rezki (Sony)  * This linked list is used in pair with free_vmap_area_root.
75268ad4a33SUladzislau Rezki (Sony)  * It gives O(1) access to prev/next to perform fast coalescing.
75368ad4a33SUladzislau Rezki (Sony)  */
75468ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list);
75568ad4a33SUladzislau Rezki (Sony) 
75668ad4a33SUladzislau Rezki (Sony) /*
75768ad4a33SUladzislau Rezki (Sony)  * This augment red-black tree represents the free vmap space.
75868ad4a33SUladzislau Rezki (Sony)  * All vmap_area objects in this tree are sorted by va->va_start
75968ad4a33SUladzislau Rezki (Sony)  * address. It is used for allocation and merging when a vmap
76068ad4a33SUladzislau Rezki (Sony)  * object is released.
76168ad4a33SUladzislau Rezki (Sony)  *
76268ad4a33SUladzislau Rezki (Sony)  * Each vmap_area node contains a maximum available free block
76368ad4a33SUladzislau Rezki (Sony)  * of its sub-tree, right or left. Therefore it is possible to
76468ad4a33SUladzislau Rezki (Sony)  * find a lowest match of free area.
76568ad4a33SUladzislau Rezki (Sony)  */
76668ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT;
76768ad4a33SUladzislau Rezki (Sony) 
76882dd23e8SUladzislau Rezki (Sony) /*
76982dd23e8SUladzislau Rezki (Sony)  * Preload a CPU with one object for "no edge" split case. The
77082dd23e8SUladzislau Rezki (Sony)  * aim is to get rid of allocations from the atomic context, thus
77182dd23e8SUladzislau Rezki (Sony)  * to use more permissive allocation masks.
77282dd23e8SUladzislau Rezki (Sony)  */
77382dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
77482dd23e8SUladzislau Rezki (Sony) 
77568ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long
77668ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va)
77768ad4a33SUladzislau Rezki (Sony) {
77868ad4a33SUladzislau Rezki (Sony) 	return (va->va_end - va->va_start);
77968ad4a33SUladzislau Rezki (Sony) }
78068ad4a33SUladzislau Rezki (Sony) 
78168ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long
78268ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node)
78368ad4a33SUladzislau Rezki (Sony) {
78468ad4a33SUladzislau Rezki (Sony) 	struct vmap_area *va;
78568ad4a33SUladzislau Rezki (Sony) 
78668ad4a33SUladzislau Rezki (Sony) 	va = rb_entry_safe(node, struct vmap_area, rb_node);
78768ad4a33SUladzislau Rezki (Sony) 	return va ? va->subtree_max_size : 0;
78868ad4a33SUladzislau Rezki (Sony) }
78968ad4a33SUladzislau Rezki (Sony) 
790315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
791315cc066SMichel Lespinasse 	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
79268ad4a33SUladzislau Rezki (Sony) 
79377e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void);
79468ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
795690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work);
796690467c8SUladzislau Rezki (Sony) static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
797db64fe02SNick Piggin 
79897105f0aSRoman Gushchin static atomic_long_t nr_vmalloc_pages;
79997105f0aSRoman Gushchin 
80097105f0aSRoman Gushchin unsigned long vmalloc_nr_pages(void)
80197105f0aSRoman Gushchin {
80297105f0aSRoman Gushchin 	return atomic_long_read(&nr_vmalloc_pages);
80397105f0aSRoman Gushchin }
80497105f0aSRoman Gushchin 
805153090f2SBaoquan He /* Look up the first VA which satisfies addr < va_end, NULL if none. */
806f181234aSChen Wandun static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
807f181234aSChen Wandun {
808f181234aSChen Wandun 	struct vmap_area *va = NULL;
809f181234aSChen Wandun 	struct rb_node *n = vmap_area_root.rb_node;
810f181234aSChen Wandun 
8114aff1dc4SAndrey Konovalov 	addr = (unsigned long)kasan_reset_tag((void *)addr);
8124aff1dc4SAndrey Konovalov 
813f181234aSChen Wandun 	while (n) {
814f181234aSChen Wandun 		struct vmap_area *tmp;
815f181234aSChen Wandun 
816f181234aSChen Wandun 		tmp = rb_entry(n, struct vmap_area, rb_node);
817f181234aSChen Wandun 		if (tmp->va_end > addr) {
818f181234aSChen Wandun 			va = tmp;
819f181234aSChen Wandun 			if (tmp->va_start <= addr)
820f181234aSChen Wandun 				break;
821f181234aSChen Wandun 
822f181234aSChen Wandun 			n = n->rb_left;
823f181234aSChen Wandun 		} else
824f181234aSChen Wandun 			n = n->rb_right;
825f181234aSChen Wandun 	}
826f181234aSChen Wandun 
827f181234aSChen Wandun 	return va;
828f181234aSChen Wandun }
829f181234aSChen Wandun 
830899c6efeSUladzislau Rezki (Sony) static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
8311da177e4SLinus Torvalds {
832899c6efeSUladzislau Rezki (Sony) 	struct rb_node *n = root->rb_node;
833db64fe02SNick Piggin 
8344aff1dc4SAndrey Konovalov 	addr = (unsigned long)kasan_reset_tag((void *)addr);
8354aff1dc4SAndrey Konovalov 
836db64fe02SNick Piggin 	while (n) {
837db64fe02SNick Piggin 		struct vmap_area *va;
838db64fe02SNick Piggin 
839db64fe02SNick Piggin 		va = rb_entry(n, struct vmap_area, rb_node);
840db64fe02SNick Piggin 		if (addr < va->va_start)
841db64fe02SNick Piggin 			n = n->rb_left;
842cef2ac3fSHATAYAMA Daisuke 		else if (addr >= va->va_end)
843db64fe02SNick Piggin 			n = n->rb_right;
844db64fe02SNick Piggin 		else
845db64fe02SNick Piggin 			return va;
846db64fe02SNick Piggin 	}
847db64fe02SNick Piggin 
848db64fe02SNick Piggin 	return NULL;
849db64fe02SNick Piggin }
850db64fe02SNick Piggin 
85168ad4a33SUladzislau Rezki (Sony) /*
85268ad4a33SUladzislau Rezki (Sony)  * This function returns back addresses of parent node
85368ad4a33SUladzislau Rezki (Sony)  * and its left or right link for further processing.
8549c801f61SUladzislau Rezki (Sony)  *
8559c801f61SUladzislau Rezki (Sony)  * Otherwise NULL is returned. In that case all further
8569c801f61SUladzislau Rezki (Sony)  * steps regarding inserting of conflicting overlap range
8579c801f61SUladzislau Rezki (Sony)  * have to be declined and actually considered as a bug.
85868ad4a33SUladzislau Rezki (Sony)  */
85968ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node **
86068ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va,
86168ad4a33SUladzislau Rezki (Sony) 	struct rb_root *root, struct rb_node *from,
86268ad4a33SUladzislau Rezki (Sony) 	struct rb_node **parent)
863db64fe02SNick Piggin {
864170168d0SNamhyung Kim 	struct vmap_area *tmp_va;
86568ad4a33SUladzislau Rezki (Sony) 	struct rb_node **link;
866db64fe02SNick Piggin 
86768ad4a33SUladzislau Rezki (Sony) 	if (root) {
86868ad4a33SUladzislau Rezki (Sony) 		link = &root->rb_node;
86968ad4a33SUladzislau Rezki (Sony) 		if (unlikely(!*link)) {
87068ad4a33SUladzislau Rezki (Sony) 			*parent = NULL;
87168ad4a33SUladzislau Rezki (Sony) 			return link;
87268ad4a33SUladzislau Rezki (Sony) 		}
87368ad4a33SUladzislau Rezki (Sony) 	} else {
87468ad4a33SUladzislau Rezki (Sony) 		link = &from;
87568ad4a33SUladzislau Rezki (Sony) 	}
87668ad4a33SUladzislau Rezki (Sony) 
87768ad4a33SUladzislau Rezki (Sony) 	/*
87868ad4a33SUladzislau Rezki (Sony) 	 * Go to the bottom of the tree. When we hit the last point
87968ad4a33SUladzislau Rezki (Sony) 	 * we end up with parent rb_node and correct direction, i name
88068ad4a33SUladzislau Rezki (Sony) 	 * it link, where the new va->rb_node will be attached to.
88168ad4a33SUladzislau Rezki (Sony) 	 */
88268ad4a33SUladzislau Rezki (Sony) 	do {
88368ad4a33SUladzislau Rezki (Sony) 		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
88468ad4a33SUladzislau Rezki (Sony) 
88568ad4a33SUladzislau Rezki (Sony) 		/*
88668ad4a33SUladzislau Rezki (Sony) 		 * During the traversal we also do some sanity check.
88768ad4a33SUladzislau Rezki (Sony) 		 * Trigger the BUG() if there are sides(left/right)
88868ad4a33SUladzislau Rezki (Sony) 		 * or full overlaps.
88968ad4a33SUladzislau Rezki (Sony) 		 */
890753df96bSBaoquan He 		if (va->va_end <= tmp_va->va_start)
89168ad4a33SUladzislau Rezki (Sony) 			link = &(*link)->rb_left;
892753df96bSBaoquan He 		else if (va->va_start >= tmp_va->va_end)
89368ad4a33SUladzislau Rezki (Sony) 			link = &(*link)->rb_right;
8949c801f61SUladzislau Rezki (Sony) 		else {
8959c801f61SUladzislau Rezki (Sony) 			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
8969c801f61SUladzislau Rezki (Sony) 				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
8979c801f61SUladzislau Rezki (Sony) 
8989c801f61SUladzislau Rezki (Sony) 			return NULL;
8999c801f61SUladzislau Rezki (Sony) 		}
90068ad4a33SUladzislau Rezki (Sony) 	} while (*link);
90168ad4a33SUladzislau Rezki (Sony) 
90268ad4a33SUladzislau Rezki (Sony) 	*parent = &tmp_va->rb_node;
90368ad4a33SUladzislau Rezki (Sony) 	return link;
904db64fe02SNick Piggin }
905db64fe02SNick Piggin 
90668ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head *
90768ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
90868ad4a33SUladzislau Rezki (Sony) {
90968ad4a33SUladzislau Rezki (Sony) 	struct list_head *list;
910db64fe02SNick Piggin 
91168ad4a33SUladzislau Rezki (Sony) 	if (unlikely(!parent))
91268ad4a33SUladzislau Rezki (Sony) 		/*
91368ad4a33SUladzislau Rezki (Sony) 		 * The red-black tree where we try to find VA neighbors
91468ad4a33SUladzislau Rezki (Sony) 		 * before merging or inserting is empty, i.e. it means
91568ad4a33SUladzislau Rezki (Sony) 		 * there is no free vmap space. Normally it does not
91668ad4a33SUladzislau Rezki (Sony) 		 * happen but we handle this case anyway.
91768ad4a33SUladzislau Rezki (Sony) 		 */
91868ad4a33SUladzislau Rezki (Sony) 		return NULL;
91968ad4a33SUladzislau Rezki (Sony) 
92068ad4a33SUladzislau Rezki (Sony) 	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
92168ad4a33SUladzislau Rezki (Sony) 	return (&parent->rb_right == link ? list->next : list);
922db64fe02SNick Piggin }
923db64fe02SNick Piggin 
92468ad4a33SUladzislau Rezki (Sony) static __always_inline void
9258eb510dbSUladzislau Rezki (Sony) __link_va(struct vmap_area *va, struct rb_root *root,
9268eb510dbSUladzislau Rezki (Sony) 	struct rb_node *parent, struct rb_node **link,
9278eb510dbSUladzislau Rezki (Sony) 	struct list_head *head, bool augment)
92868ad4a33SUladzislau Rezki (Sony) {
92968ad4a33SUladzislau Rezki (Sony) 	/*
93068ad4a33SUladzislau Rezki (Sony) 	 * VA is still not in the list, but we can
93168ad4a33SUladzislau Rezki (Sony) 	 * identify its future previous list_head node.
93268ad4a33SUladzislau Rezki (Sony) 	 */
93368ad4a33SUladzislau Rezki (Sony) 	if (likely(parent)) {
93468ad4a33SUladzislau Rezki (Sony) 		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
93568ad4a33SUladzislau Rezki (Sony) 		if (&parent->rb_right != link)
93668ad4a33SUladzislau Rezki (Sony) 			head = head->prev;
93768ad4a33SUladzislau Rezki (Sony) 	}
938db64fe02SNick Piggin 
93968ad4a33SUladzislau Rezki (Sony) 	/* Insert to the rb-tree */
94068ad4a33SUladzislau Rezki (Sony) 	rb_link_node(&va->rb_node, parent, link);
9418eb510dbSUladzislau Rezki (Sony) 	if (augment) {
94268ad4a33SUladzislau Rezki (Sony) 		/*
94368ad4a33SUladzislau Rezki (Sony) 		 * Some explanation here. Just perform simple insertion
94468ad4a33SUladzislau Rezki (Sony) 		 * to the tree. We do not set va->subtree_max_size to
94568ad4a33SUladzislau Rezki (Sony) 		 * its current size before calling rb_insert_augmented().
946153090f2SBaoquan He 		 * It is because we populate the tree from the bottom
94768ad4a33SUladzislau Rezki (Sony) 		 * to parent levels when the node _is_ in the tree.
94868ad4a33SUladzislau Rezki (Sony) 		 *
94968ad4a33SUladzislau Rezki (Sony) 		 * Therefore we set subtree_max_size to zero after insertion,
95068ad4a33SUladzislau Rezki (Sony) 		 * to let __augment_tree_propagate_from() puts everything to
95168ad4a33SUladzislau Rezki (Sony) 		 * the correct order later on.
95268ad4a33SUladzislau Rezki (Sony) 		 */
95368ad4a33SUladzislau Rezki (Sony) 		rb_insert_augmented(&va->rb_node,
95468ad4a33SUladzislau Rezki (Sony) 			root, &free_vmap_area_rb_augment_cb);
95568ad4a33SUladzislau Rezki (Sony) 		va->subtree_max_size = 0;
95668ad4a33SUladzislau Rezki (Sony) 	} else {
95768ad4a33SUladzislau Rezki (Sony) 		rb_insert_color(&va->rb_node, root);
95868ad4a33SUladzislau Rezki (Sony) 	}
95968ad4a33SUladzislau Rezki (Sony) 
96068ad4a33SUladzislau Rezki (Sony) 	/* Address-sort this list */
96168ad4a33SUladzislau Rezki (Sony) 	list_add(&va->list, head);
96268ad4a33SUladzislau Rezki (Sony) }
96368ad4a33SUladzislau Rezki (Sony) 
96468ad4a33SUladzislau Rezki (Sony) static __always_inline void
9658eb510dbSUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root,
9668eb510dbSUladzislau Rezki (Sony) 	struct rb_node *parent, struct rb_node **link,
9678eb510dbSUladzislau Rezki (Sony) 	struct list_head *head)
9688eb510dbSUladzislau Rezki (Sony) {
9698eb510dbSUladzislau Rezki (Sony) 	__link_va(va, root, parent, link, head, false);
9708eb510dbSUladzislau Rezki (Sony) }
9718eb510dbSUladzislau Rezki (Sony) 
9728eb510dbSUladzislau Rezki (Sony) static __always_inline void
9738eb510dbSUladzislau Rezki (Sony) link_va_augment(struct vmap_area *va, struct rb_root *root,
9748eb510dbSUladzislau Rezki (Sony) 	struct rb_node *parent, struct rb_node **link,
9758eb510dbSUladzislau Rezki (Sony) 	struct list_head *head)
9768eb510dbSUladzislau Rezki (Sony) {
9778eb510dbSUladzislau Rezki (Sony) 	__link_va(va, root, parent, link, head, true);
9788eb510dbSUladzislau Rezki (Sony) }
9798eb510dbSUladzislau Rezki (Sony) 
9808eb510dbSUladzislau Rezki (Sony) static __always_inline void
9818eb510dbSUladzislau Rezki (Sony) __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
98268ad4a33SUladzislau Rezki (Sony) {
983460e42d1SUladzislau Rezki (Sony) 	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
984460e42d1SUladzislau Rezki (Sony) 		return;
985460e42d1SUladzislau Rezki (Sony) 
9868eb510dbSUladzislau Rezki (Sony) 	if (augment)
98768ad4a33SUladzislau Rezki (Sony) 		rb_erase_augmented(&va->rb_node,
98868ad4a33SUladzislau Rezki (Sony) 			root, &free_vmap_area_rb_augment_cb);
98968ad4a33SUladzislau Rezki (Sony) 	else
99068ad4a33SUladzislau Rezki (Sony) 		rb_erase(&va->rb_node, root);
99168ad4a33SUladzislau Rezki (Sony) 
9925d7a7c54SUladzislau Rezki (Sony) 	list_del_init(&va->list);
99368ad4a33SUladzislau Rezki (Sony) 	RB_CLEAR_NODE(&va->rb_node);
99468ad4a33SUladzislau Rezki (Sony) }
99568ad4a33SUladzislau Rezki (Sony) 
9968eb510dbSUladzislau Rezki (Sony) static __always_inline void
9978eb510dbSUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root)
9988eb510dbSUladzislau Rezki (Sony) {
9998eb510dbSUladzislau Rezki (Sony) 	__unlink_va(va, root, false);
10008eb510dbSUladzislau Rezki (Sony) }
10018eb510dbSUladzislau Rezki (Sony) 
10028eb510dbSUladzislau Rezki (Sony) static __always_inline void
10038eb510dbSUladzislau Rezki (Sony) unlink_va_augment(struct vmap_area *va, struct rb_root *root)
10048eb510dbSUladzislau Rezki (Sony) {
10058eb510dbSUladzislau Rezki (Sony) 	__unlink_va(va, root, true);
10068eb510dbSUladzislau Rezki (Sony) }
10078eb510dbSUladzislau Rezki (Sony) 
1008bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK
1009c3385e84SJiapeng Chong /*
1010c3385e84SJiapeng Chong  * Gets called when remove the node and rotate.
1011c3385e84SJiapeng Chong  */
1012c3385e84SJiapeng Chong static __always_inline unsigned long
1013c3385e84SJiapeng Chong compute_subtree_max_size(struct vmap_area *va)
1014c3385e84SJiapeng Chong {
1015c3385e84SJiapeng Chong 	return max3(va_size(va),
1016c3385e84SJiapeng Chong 		get_subtree_max_size(va->rb_node.rb_left),
1017c3385e84SJiapeng Chong 		get_subtree_max_size(va->rb_node.rb_right));
1018c3385e84SJiapeng Chong }
1019c3385e84SJiapeng Chong 
1020bb850f4dSUladzislau Rezki (Sony) static void
1021da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(void)
1022bb850f4dSUladzislau Rezki (Sony) {
1023bb850f4dSUladzislau Rezki (Sony) 	struct vmap_area *va;
1024da27c9edSUladzislau Rezki (Sony) 	unsigned long computed_size;
1025bb850f4dSUladzislau Rezki (Sony) 
1026da27c9edSUladzislau Rezki (Sony) 	list_for_each_entry(va, &free_vmap_area_list, list) {
1027da27c9edSUladzislau Rezki (Sony) 		computed_size = compute_subtree_max_size(va);
1028da27c9edSUladzislau Rezki (Sony) 		if (computed_size != va->subtree_max_size)
1029bb850f4dSUladzislau Rezki (Sony) 			pr_emerg("tree is corrupted: %lu, %lu\n",
1030bb850f4dSUladzislau Rezki (Sony) 				va_size(va), va->subtree_max_size);
1031bb850f4dSUladzislau Rezki (Sony) 	}
1032bb850f4dSUladzislau Rezki (Sony) }
1033bb850f4dSUladzislau Rezki (Sony) #endif
1034bb850f4dSUladzislau Rezki (Sony) 
103568ad4a33SUladzislau Rezki (Sony) /*
103668ad4a33SUladzislau Rezki (Sony)  * This function populates subtree_max_size from bottom to upper
103768ad4a33SUladzislau Rezki (Sony)  * levels starting from VA point. The propagation must be done
103868ad4a33SUladzislau Rezki (Sony)  * when VA size is modified by changing its va_start/va_end. Or
103968ad4a33SUladzislau Rezki (Sony)  * in case of newly inserting of VA to the tree.
104068ad4a33SUladzislau Rezki (Sony)  *
104168ad4a33SUladzislau Rezki (Sony)  * It means that __augment_tree_propagate_from() must be called:
104268ad4a33SUladzislau Rezki (Sony)  * - After VA has been inserted to the tree(free path);
104368ad4a33SUladzislau Rezki (Sony)  * - After VA has been shrunk(allocation path);
104468ad4a33SUladzislau Rezki (Sony)  * - After VA has been increased(merging path).
104568ad4a33SUladzislau Rezki (Sony)  *
104668ad4a33SUladzislau Rezki (Sony)  * Please note that, it does not mean that upper parent nodes
104768ad4a33SUladzislau Rezki (Sony)  * and their subtree_max_size are recalculated all the time up
104868ad4a33SUladzislau Rezki (Sony)  * to the root node.
104968ad4a33SUladzislau Rezki (Sony)  *
105068ad4a33SUladzislau Rezki (Sony)  *       4--8
105168ad4a33SUladzislau Rezki (Sony)  *        /\
105268ad4a33SUladzislau Rezki (Sony)  *       /  \
105368ad4a33SUladzislau Rezki (Sony)  *      /    \
105468ad4a33SUladzislau Rezki (Sony)  *    2--2  8--8
105568ad4a33SUladzislau Rezki (Sony)  *
105668ad4a33SUladzislau Rezki (Sony)  * For example if we modify the node 4, shrinking it to 2, then
105768ad4a33SUladzislau Rezki (Sony)  * no any modification is required. If we shrink the node 2 to 1
105868ad4a33SUladzislau Rezki (Sony)  * its subtree_max_size is updated only, and set to 1. If we shrink
105968ad4a33SUladzislau Rezki (Sony)  * the node 8 to 6, then its subtree_max_size is set to 6 and parent
106068ad4a33SUladzislau Rezki (Sony)  * node becomes 4--6.
106168ad4a33SUladzislau Rezki (Sony)  */
106268ad4a33SUladzislau Rezki (Sony) static __always_inline void
106368ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va)
106468ad4a33SUladzislau Rezki (Sony) {
106568ad4a33SUladzislau Rezki (Sony) 	/*
106615ae144fSUladzislau Rezki (Sony) 	 * Populate the tree from bottom towards the root until
106715ae144fSUladzislau Rezki (Sony) 	 * the calculated maximum available size of checked node
106815ae144fSUladzislau Rezki (Sony) 	 * is equal to its current one.
106968ad4a33SUladzislau Rezki (Sony) 	 */
107015ae144fSUladzislau Rezki (Sony) 	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1071bb850f4dSUladzislau Rezki (Sony) 
1072bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK
1073da27c9edSUladzislau Rezki (Sony) 	augment_tree_propagate_check();
1074bb850f4dSUladzislau Rezki (Sony) #endif
107568ad4a33SUladzislau Rezki (Sony) }
107668ad4a33SUladzislau Rezki (Sony) 
107768ad4a33SUladzislau Rezki (Sony) static void
107868ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va,
107968ad4a33SUladzislau Rezki (Sony) 	struct rb_root *root, struct list_head *head)
108068ad4a33SUladzislau Rezki (Sony) {
108168ad4a33SUladzislau Rezki (Sony) 	struct rb_node **link;
108268ad4a33SUladzislau Rezki (Sony) 	struct rb_node *parent;
108368ad4a33SUladzislau Rezki (Sony) 
108468ad4a33SUladzislau Rezki (Sony) 	link = find_va_links(va, root, NULL, &parent);
10859c801f61SUladzislau Rezki (Sony) 	if (link)
108668ad4a33SUladzislau Rezki (Sony) 		link_va(va, root, parent, link, head);
108768ad4a33SUladzislau Rezki (Sony) }
108868ad4a33SUladzislau Rezki (Sony) 
108968ad4a33SUladzislau Rezki (Sony) static void
109068ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va,
109168ad4a33SUladzislau Rezki (Sony) 	struct rb_node *from, struct rb_root *root,
109268ad4a33SUladzislau Rezki (Sony) 	struct list_head *head)
109368ad4a33SUladzislau Rezki (Sony) {
109468ad4a33SUladzislau Rezki (Sony) 	struct rb_node **link;
109568ad4a33SUladzislau Rezki (Sony) 	struct rb_node *parent;
109668ad4a33SUladzislau Rezki (Sony) 
109768ad4a33SUladzislau Rezki (Sony) 	if (from)
109868ad4a33SUladzislau Rezki (Sony) 		link = find_va_links(va, NULL, from, &parent);
109968ad4a33SUladzislau Rezki (Sony) 	else
110068ad4a33SUladzislau Rezki (Sony) 		link = find_va_links(va, root, NULL, &parent);
110168ad4a33SUladzislau Rezki (Sony) 
11029c801f61SUladzislau Rezki (Sony) 	if (link) {
11038eb510dbSUladzislau Rezki (Sony) 		link_va_augment(va, root, parent, link, head);
110468ad4a33SUladzislau Rezki (Sony) 		augment_tree_propagate_from(va);
110568ad4a33SUladzislau Rezki (Sony) 	}
11069c801f61SUladzislau Rezki (Sony) }
110768ad4a33SUladzislau Rezki (Sony) 
110868ad4a33SUladzislau Rezki (Sony) /*
110968ad4a33SUladzislau Rezki (Sony)  * Merge de-allocated chunk of VA memory with previous
111068ad4a33SUladzislau Rezki (Sony)  * and next free blocks. If coalesce is not done a new
111168ad4a33SUladzislau Rezki (Sony)  * free area is inserted. If VA has been merged, it is
111268ad4a33SUladzislau Rezki (Sony)  * freed.
11139c801f61SUladzislau Rezki (Sony)  *
11149c801f61SUladzislau Rezki (Sony)  * Please note, it can return NULL in case of overlap
11159c801f61SUladzislau Rezki (Sony)  * ranges, followed by WARN() report. Despite it is a
11169c801f61SUladzislau Rezki (Sony)  * buggy behaviour, a system can be alive and keep
11179c801f61SUladzislau Rezki (Sony)  * ongoing.
111868ad4a33SUladzislau Rezki (Sony)  */
11193c5c3cfbSDaniel Axtens static __always_inline struct vmap_area *
11208eb510dbSUladzislau Rezki (Sony) __merge_or_add_vmap_area(struct vmap_area *va,
11218eb510dbSUladzislau Rezki (Sony) 	struct rb_root *root, struct list_head *head, bool augment)
112268ad4a33SUladzislau Rezki (Sony) {
112368ad4a33SUladzislau Rezki (Sony) 	struct vmap_area *sibling;
112468ad4a33SUladzislau Rezki (Sony) 	struct list_head *next;
112568ad4a33SUladzislau Rezki (Sony) 	struct rb_node **link;
112668ad4a33SUladzislau Rezki (Sony) 	struct rb_node *parent;
112768ad4a33SUladzislau Rezki (Sony) 	bool merged = false;
112868ad4a33SUladzislau Rezki (Sony) 
112968ad4a33SUladzislau Rezki (Sony) 	/*
113068ad4a33SUladzislau Rezki (Sony) 	 * Find a place in the tree where VA potentially will be
113168ad4a33SUladzislau Rezki (Sony) 	 * inserted, unless it is merged with its sibling/siblings.
113268ad4a33SUladzislau Rezki (Sony) 	 */
113368ad4a33SUladzislau Rezki (Sony) 	link = find_va_links(va, root, NULL, &parent);
11349c801f61SUladzislau Rezki (Sony) 	if (!link)
11359c801f61SUladzislau Rezki (Sony) 		return NULL;
113668ad4a33SUladzislau Rezki (Sony) 
113768ad4a33SUladzislau Rezki (Sony) 	/*
113868ad4a33SUladzislau Rezki (Sony) 	 * Get next node of VA to check if merging can be done.
113968ad4a33SUladzislau Rezki (Sony) 	 */
114068ad4a33SUladzislau Rezki (Sony) 	next = get_va_next_sibling(parent, link);
114168ad4a33SUladzislau Rezki (Sony) 	if (unlikely(next == NULL))
114268ad4a33SUladzislau Rezki (Sony) 		goto insert;
114368ad4a33SUladzislau Rezki (Sony) 
114468ad4a33SUladzislau Rezki (Sony) 	/*
114568ad4a33SUladzislau Rezki (Sony) 	 * start            end
114668ad4a33SUladzislau Rezki (Sony) 	 * |                |
114768ad4a33SUladzislau Rezki (Sony) 	 * |<------VA------>|<-----Next----->|
114868ad4a33SUladzislau Rezki (Sony) 	 *                  |                |
114968ad4a33SUladzislau Rezki (Sony) 	 *                  start            end
115068ad4a33SUladzislau Rezki (Sony) 	 */
115168ad4a33SUladzislau Rezki (Sony) 	if (next != head) {
115268ad4a33SUladzislau Rezki (Sony) 		sibling = list_entry(next, struct vmap_area, list);
115368ad4a33SUladzislau Rezki (Sony) 		if (sibling->va_start == va->va_end) {
115468ad4a33SUladzislau Rezki (Sony) 			sibling->va_start = va->va_start;
115568ad4a33SUladzislau Rezki (Sony) 
115668ad4a33SUladzislau Rezki (Sony) 			/* Free vmap_area object. */
115768ad4a33SUladzislau Rezki (Sony) 			kmem_cache_free(vmap_area_cachep, va);
115868ad4a33SUladzislau Rezki (Sony) 
115968ad4a33SUladzislau Rezki (Sony) 			/* Point to the new merged area. */
116068ad4a33SUladzislau Rezki (Sony) 			va = sibling;
116168ad4a33SUladzislau Rezki (Sony) 			merged = true;
116268ad4a33SUladzislau Rezki (Sony) 		}
116368ad4a33SUladzislau Rezki (Sony) 	}
116468ad4a33SUladzislau Rezki (Sony) 
116568ad4a33SUladzislau Rezki (Sony) 	/*
116668ad4a33SUladzislau Rezki (Sony) 	 * start            end
116768ad4a33SUladzislau Rezki (Sony) 	 * |                |
116868ad4a33SUladzislau Rezki (Sony) 	 * |<-----Prev----->|<------VA------>|
116968ad4a33SUladzislau Rezki (Sony) 	 *                  |                |
117068ad4a33SUladzislau Rezki (Sony) 	 *                  start            end
117168ad4a33SUladzislau Rezki (Sony) 	 */
117268ad4a33SUladzislau Rezki (Sony) 	if (next->prev != head) {
117368ad4a33SUladzislau Rezki (Sony) 		sibling = list_entry(next->prev, struct vmap_area, list);
117468ad4a33SUladzislau Rezki (Sony) 		if (sibling->va_end == va->va_start) {
11755dd78640SUladzislau Rezki (Sony) 			/*
11765dd78640SUladzislau Rezki (Sony) 			 * If both neighbors are coalesced, it is important
11775dd78640SUladzislau Rezki (Sony) 			 * to unlink the "next" node first, followed by merging
11785dd78640SUladzislau Rezki (Sony) 			 * with "previous" one. Otherwise the tree might not be
11795dd78640SUladzislau Rezki (Sony) 			 * fully populated if a sibling's augmented value is
11805dd78640SUladzislau Rezki (Sony) 			 * "normalized" because of rotation operations.
11815dd78640SUladzislau Rezki (Sony) 			 */
118254f63d9dSUladzislau Rezki (Sony) 			if (merged)
11838eb510dbSUladzislau Rezki (Sony) 				__unlink_va(va, root, augment);
118468ad4a33SUladzislau Rezki (Sony) 
11855dd78640SUladzislau Rezki (Sony) 			sibling->va_end = va->va_end;
11865dd78640SUladzislau Rezki (Sony) 
118768ad4a33SUladzislau Rezki (Sony) 			/* Free vmap_area object. */
118868ad4a33SUladzislau Rezki (Sony) 			kmem_cache_free(vmap_area_cachep, va);
11893c5c3cfbSDaniel Axtens 
11903c5c3cfbSDaniel Axtens 			/* Point to the new merged area. */
11913c5c3cfbSDaniel Axtens 			va = sibling;
11923c5c3cfbSDaniel Axtens 			merged = true;
119368ad4a33SUladzislau Rezki (Sony) 		}
119468ad4a33SUladzislau Rezki (Sony) 	}
119568ad4a33SUladzislau Rezki (Sony) 
119668ad4a33SUladzislau Rezki (Sony) insert:
11975dd78640SUladzislau Rezki (Sony) 	if (!merged)
11988eb510dbSUladzislau Rezki (Sony) 		__link_va(va, root, parent, link, head, augment);
11993c5c3cfbSDaniel Axtens 
120096e2db45SUladzislau Rezki (Sony) 	return va;
120196e2db45SUladzislau Rezki (Sony) }
120296e2db45SUladzislau Rezki (Sony) 
120396e2db45SUladzislau Rezki (Sony) static __always_inline struct vmap_area *
12048eb510dbSUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va,
12058eb510dbSUladzislau Rezki (Sony) 	struct rb_root *root, struct list_head *head)
12068eb510dbSUladzislau Rezki (Sony) {
12078eb510dbSUladzislau Rezki (Sony) 	return __merge_or_add_vmap_area(va, root, head, false);
12088eb510dbSUladzislau Rezki (Sony) }
12098eb510dbSUladzislau Rezki (Sony) 
12108eb510dbSUladzislau Rezki (Sony) static __always_inline struct vmap_area *
121196e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(struct vmap_area *va,
121296e2db45SUladzislau Rezki (Sony) 	struct rb_root *root, struct list_head *head)
121396e2db45SUladzislau Rezki (Sony) {
12148eb510dbSUladzislau Rezki (Sony) 	va = __merge_or_add_vmap_area(va, root, head, true);
121596e2db45SUladzislau Rezki (Sony) 	if (va)
12165dd78640SUladzislau Rezki (Sony) 		augment_tree_propagate_from(va);
121796e2db45SUladzislau Rezki (Sony) 
12183c5c3cfbSDaniel Axtens 	return va;
121968ad4a33SUladzislau Rezki (Sony) }
122068ad4a33SUladzislau Rezki (Sony) 
122168ad4a33SUladzislau Rezki (Sony) static __always_inline bool
122268ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size,
122368ad4a33SUladzislau Rezki (Sony) 	unsigned long align, unsigned long vstart)
122468ad4a33SUladzislau Rezki (Sony) {
122568ad4a33SUladzislau Rezki (Sony) 	unsigned long nva_start_addr;
122668ad4a33SUladzislau Rezki (Sony) 
122768ad4a33SUladzislau Rezki (Sony) 	if (va->va_start > vstart)
122868ad4a33SUladzislau Rezki (Sony) 		nva_start_addr = ALIGN(va->va_start, align);
122968ad4a33SUladzislau Rezki (Sony) 	else
123068ad4a33SUladzislau Rezki (Sony) 		nva_start_addr = ALIGN(vstart, align);
123168ad4a33SUladzislau Rezki (Sony) 
123268ad4a33SUladzislau Rezki (Sony) 	/* Can be overflowed due to big size or alignment. */
123368ad4a33SUladzislau Rezki (Sony) 	if (nva_start_addr + size < nva_start_addr ||
123468ad4a33SUladzislau Rezki (Sony) 			nva_start_addr < vstart)
123568ad4a33SUladzislau Rezki (Sony) 		return false;
123668ad4a33SUladzislau Rezki (Sony) 
123768ad4a33SUladzislau Rezki (Sony) 	return (nva_start_addr + size <= va->va_end);
123868ad4a33SUladzislau Rezki (Sony) }
123968ad4a33SUladzislau Rezki (Sony) 
124068ad4a33SUladzislau Rezki (Sony) /*
124168ad4a33SUladzislau Rezki (Sony)  * Find the first free block(lowest start address) in the tree,
124268ad4a33SUladzislau Rezki (Sony)  * that will accomplish the request corresponding to passing
12439333fe98SUladzislau Rezki  * parameters. Please note, with an alignment bigger than PAGE_SIZE,
12449333fe98SUladzislau Rezki  * a search length is adjusted to account for worst case alignment
12459333fe98SUladzislau Rezki  * overhead.
124668ad4a33SUladzislau Rezki (Sony)  */
124768ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area *
1248f9863be4SUladzislau Rezki (Sony) find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1249f9863be4SUladzislau Rezki (Sony) 	unsigned long align, unsigned long vstart, bool adjust_search_size)
125068ad4a33SUladzislau Rezki (Sony) {
125168ad4a33SUladzislau Rezki (Sony) 	struct vmap_area *va;
125268ad4a33SUladzislau Rezki (Sony) 	struct rb_node *node;
12539333fe98SUladzislau Rezki 	unsigned long length;
125468ad4a33SUladzislau Rezki (Sony) 
125568ad4a33SUladzislau Rezki (Sony) 	/* Start from the root. */
1256f9863be4SUladzislau Rezki (Sony) 	node = root->rb_node;
125768ad4a33SUladzislau Rezki (Sony) 
12589333fe98SUladzislau Rezki 	/* Adjust the search size for alignment overhead. */
12599333fe98SUladzislau Rezki 	length = adjust_search_size ? size + align - 1 : size;
12609333fe98SUladzislau Rezki 
126168ad4a33SUladzislau Rezki (Sony) 	while (node) {
126268ad4a33SUladzislau Rezki (Sony) 		va = rb_entry(node, struct vmap_area, rb_node);
126368ad4a33SUladzislau Rezki (Sony) 
12649333fe98SUladzislau Rezki 		if (get_subtree_max_size(node->rb_left) >= length &&
126568ad4a33SUladzislau Rezki (Sony) 				vstart < va->va_start) {
126668ad4a33SUladzislau Rezki (Sony) 			node = node->rb_left;
126768ad4a33SUladzislau Rezki (Sony) 		} else {
126868ad4a33SUladzislau Rezki (Sony) 			if (is_within_this_va(va, size, align, vstart))
126968ad4a33SUladzislau Rezki (Sony) 				return va;
127068ad4a33SUladzislau Rezki (Sony) 
127168ad4a33SUladzislau Rezki (Sony) 			/*
127268ad4a33SUladzislau Rezki (Sony) 			 * Does not make sense to go deeper towards the right
127368ad4a33SUladzislau Rezki (Sony) 			 * sub-tree if it does not have a free block that is
12749333fe98SUladzislau Rezki 			 * equal or bigger to the requested search length.
127568ad4a33SUladzislau Rezki (Sony) 			 */
12769333fe98SUladzislau Rezki 			if (get_subtree_max_size(node->rb_right) >= length) {
127768ad4a33SUladzislau Rezki (Sony) 				node = node->rb_right;
127868ad4a33SUladzislau Rezki (Sony) 				continue;
127968ad4a33SUladzislau Rezki (Sony) 			}
128068ad4a33SUladzislau Rezki (Sony) 
128168ad4a33SUladzislau Rezki (Sony) 			/*
12823806b041SAndrew Morton 			 * OK. We roll back and find the first right sub-tree,
128368ad4a33SUladzislau Rezki (Sony) 			 * that will satisfy the search criteria. It can happen
12849f531973SUladzislau Rezki (Sony) 			 * due to "vstart" restriction or an alignment overhead
12859f531973SUladzislau Rezki (Sony) 			 * that is bigger then PAGE_SIZE.
128668ad4a33SUladzislau Rezki (Sony) 			 */
128768ad4a33SUladzislau Rezki (Sony) 			while ((node = rb_parent(node))) {
128868ad4a33SUladzislau Rezki (Sony) 				va = rb_entry(node, struct vmap_area, rb_node);
128968ad4a33SUladzislau Rezki (Sony) 				if (is_within_this_va(va, size, align, vstart))
129068ad4a33SUladzislau Rezki (Sony) 					return va;
129168ad4a33SUladzislau Rezki (Sony) 
12929333fe98SUladzislau Rezki 				if (get_subtree_max_size(node->rb_right) >= length &&
129368ad4a33SUladzislau Rezki (Sony) 						vstart <= va->va_start) {
12949f531973SUladzislau Rezki (Sony) 					/*
12959f531973SUladzislau Rezki (Sony) 					 * Shift the vstart forward. Please note, we update it with
12969f531973SUladzislau Rezki (Sony) 					 * parent's start address adding "1" because we do not want
12979f531973SUladzislau Rezki (Sony) 					 * to enter same sub-tree after it has already been checked
12989f531973SUladzislau Rezki (Sony) 					 * and no suitable free block found there.
12999f531973SUladzislau Rezki (Sony) 					 */
13009f531973SUladzislau Rezki (Sony) 					vstart = va->va_start + 1;
130168ad4a33SUladzislau Rezki (Sony) 					node = node->rb_right;
130268ad4a33SUladzislau Rezki (Sony) 					break;
130368ad4a33SUladzislau Rezki (Sony) 				}
130468ad4a33SUladzislau Rezki (Sony) 			}
130568ad4a33SUladzislau Rezki (Sony) 		}
130668ad4a33SUladzislau Rezki (Sony) 	}
130768ad4a33SUladzislau Rezki (Sony) 
130868ad4a33SUladzislau Rezki (Sony) 	return NULL;
130968ad4a33SUladzislau Rezki (Sony) }
131068ad4a33SUladzislau Rezki (Sony) 
1311a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1312a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h>
1313a6cf4e0fSUladzislau Rezki (Sony) 
1314a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area *
1315bd1264c3SSong Liu find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1316a6cf4e0fSUladzislau Rezki (Sony) 	unsigned long align, unsigned long vstart)
1317a6cf4e0fSUladzislau Rezki (Sony) {
1318a6cf4e0fSUladzislau Rezki (Sony) 	struct vmap_area *va;
1319a6cf4e0fSUladzislau Rezki (Sony) 
1320bd1264c3SSong Liu 	list_for_each_entry(va, head, list) {
1321a6cf4e0fSUladzislau Rezki (Sony) 		if (!is_within_this_va(va, size, align, vstart))
1322a6cf4e0fSUladzislau Rezki (Sony) 			continue;
1323a6cf4e0fSUladzislau Rezki (Sony) 
1324a6cf4e0fSUladzislau Rezki (Sony) 		return va;
1325a6cf4e0fSUladzislau Rezki (Sony) 	}
1326a6cf4e0fSUladzislau Rezki (Sony) 
1327a6cf4e0fSUladzislau Rezki (Sony) 	return NULL;
1328a6cf4e0fSUladzislau Rezki (Sony) }
1329a6cf4e0fSUladzislau Rezki (Sony) 
1330a6cf4e0fSUladzislau Rezki (Sony) static void
1331bd1264c3SSong Liu find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1332bd1264c3SSong Liu 			     unsigned long size, unsigned long align)
1333a6cf4e0fSUladzislau Rezki (Sony) {
1334a6cf4e0fSUladzislau Rezki (Sony) 	struct vmap_area *va_1, *va_2;
1335a6cf4e0fSUladzislau Rezki (Sony) 	unsigned long vstart;
1336a6cf4e0fSUladzislau Rezki (Sony) 	unsigned int rnd;
1337a6cf4e0fSUladzislau Rezki (Sony) 
1338a6cf4e0fSUladzislau Rezki (Sony) 	get_random_bytes(&rnd, sizeof(rnd));
1339a6cf4e0fSUladzislau Rezki (Sony) 	vstart = VMALLOC_START + rnd;
1340a6cf4e0fSUladzislau Rezki (Sony) 
1341bd1264c3SSong Liu 	va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1342bd1264c3SSong Liu 	va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1343a6cf4e0fSUladzislau Rezki (Sony) 
1344a6cf4e0fSUladzislau Rezki (Sony) 	if (va_1 != va_2)
1345a6cf4e0fSUladzislau Rezki (Sony) 		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1346a6cf4e0fSUladzislau Rezki (Sony) 			va_1, va_2, vstart);
1347a6cf4e0fSUladzislau Rezki (Sony) }
1348a6cf4e0fSUladzislau Rezki (Sony) #endif
1349a6cf4e0fSUladzislau Rezki (Sony) 
135068ad4a33SUladzislau Rezki (Sony) enum fit_type {
135168ad4a33SUladzislau Rezki (Sony) 	NOTHING_FIT = 0,
135268ad4a33SUladzislau Rezki (Sony) 	FL_FIT_TYPE = 1,	/* full fit */
135368ad4a33SUladzislau Rezki (Sony) 	LE_FIT_TYPE = 2,	/* left edge fit */
135468ad4a33SUladzislau Rezki (Sony) 	RE_FIT_TYPE = 3,	/* right edge fit */
135568ad4a33SUladzislau Rezki (Sony) 	NE_FIT_TYPE = 4		/* no edge fit */
135668ad4a33SUladzislau Rezki (Sony) };
135768ad4a33SUladzislau Rezki (Sony) 
135868ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type
135968ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va,
136068ad4a33SUladzislau Rezki (Sony) 	unsigned long nva_start_addr, unsigned long size)
136168ad4a33SUladzislau Rezki (Sony) {
136268ad4a33SUladzislau Rezki (Sony) 	enum fit_type type;
136368ad4a33SUladzislau Rezki (Sony) 
136468ad4a33SUladzislau Rezki (Sony) 	/* Check if it is within VA. */
136568ad4a33SUladzislau Rezki (Sony) 	if (nva_start_addr < va->va_start ||
136668ad4a33SUladzislau Rezki (Sony) 			nva_start_addr + size > va->va_end)
136768ad4a33SUladzislau Rezki (Sony) 		return NOTHING_FIT;
136868ad4a33SUladzislau Rezki (Sony) 
136968ad4a33SUladzislau Rezki (Sony) 	/* Now classify. */
137068ad4a33SUladzislau Rezki (Sony) 	if (va->va_start == nva_start_addr) {
137168ad4a33SUladzislau Rezki (Sony) 		if (va->va_end == nva_start_addr + size)
137268ad4a33SUladzislau Rezki (Sony) 			type = FL_FIT_TYPE;
137368ad4a33SUladzislau Rezki (Sony) 		else
137468ad4a33SUladzislau Rezki (Sony) 			type = LE_FIT_TYPE;
137568ad4a33SUladzislau Rezki (Sony) 	} else if (va->va_end == nva_start_addr + size) {
137668ad4a33SUladzislau Rezki (Sony) 		type = RE_FIT_TYPE;
137768ad4a33SUladzislau Rezki (Sony) 	} else {
137868ad4a33SUladzislau Rezki (Sony) 		type = NE_FIT_TYPE;
137968ad4a33SUladzislau Rezki (Sony) 	}
138068ad4a33SUladzislau Rezki (Sony) 
138168ad4a33SUladzislau Rezki (Sony) 	return type;
138268ad4a33SUladzislau Rezki (Sony) }
138368ad4a33SUladzislau Rezki (Sony) 
138468ad4a33SUladzislau Rezki (Sony) static __always_inline int
1385f9863be4SUladzislau Rezki (Sony) adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
1386f9863be4SUladzislau Rezki (Sony) 		      struct vmap_area *va, unsigned long nva_start_addr,
1387f9863be4SUladzislau Rezki (Sony) 		      unsigned long size)
138868ad4a33SUladzislau Rezki (Sony) {
13892c929233SArnd Bergmann 	struct vmap_area *lva = NULL;
13901b23ff80SBaoquan He 	enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
139168ad4a33SUladzislau Rezki (Sony) 
139268ad4a33SUladzislau Rezki (Sony) 	if (type == FL_FIT_TYPE) {
139368ad4a33SUladzislau Rezki (Sony) 		/*
139468ad4a33SUladzislau Rezki (Sony) 		 * No need to split VA, it fully fits.
139568ad4a33SUladzislau Rezki (Sony) 		 *
139668ad4a33SUladzislau Rezki (Sony) 		 * |               |
139768ad4a33SUladzislau Rezki (Sony) 		 * V      NVA      V
139868ad4a33SUladzislau Rezki (Sony) 		 * |---------------|
139968ad4a33SUladzislau Rezki (Sony) 		 */
1400f9863be4SUladzislau Rezki (Sony) 		unlink_va_augment(va, root);
140168ad4a33SUladzislau Rezki (Sony) 		kmem_cache_free(vmap_area_cachep, va);
140268ad4a33SUladzislau Rezki (Sony) 	} else if (type == LE_FIT_TYPE) {
140368ad4a33SUladzislau Rezki (Sony) 		/*
140468ad4a33SUladzislau Rezki (Sony) 		 * Split left edge of fit VA.
140568ad4a33SUladzislau Rezki (Sony) 		 *
140668ad4a33SUladzislau Rezki (Sony) 		 * |       |
140768ad4a33SUladzislau Rezki (Sony) 		 * V  NVA  V   R
140868ad4a33SUladzislau Rezki (Sony) 		 * |-------|-------|
140968ad4a33SUladzislau Rezki (Sony) 		 */
141068ad4a33SUladzislau Rezki (Sony) 		va->va_start += size;
141168ad4a33SUladzislau Rezki (Sony) 	} else if (type == RE_FIT_TYPE) {
141268ad4a33SUladzislau Rezki (Sony) 		/*
141368ad4a33SUladzislau Rezki (Sony) 		 * Split right edge of fit VA.
141468ad4a33SUladzislau Rezki (Sony) 		 *
141568ad4a33SUladzislau Rezki (Sony) 		 *         |       |
141668ad4a33SUladzislau Rezki (Sony) 		 *     L   V  NVA  V
141768ad4a33SUladzislau Rezki (Sony) 		 * |-------|-------|
141868ad4a33SUladzislau Rezki (Sony) 		 */
141968ad4a33SUladzislau Rezki (Sony) 		va->va_end = nva_start_addr;
142068ad4a33SUladzislau Rezki (Sony) 	} else if (type == NE_FIT_TYPE) {
142168ad4a33SUladzislau Rezki (Sony) 		/*
142268ad4a33SUladzislau Rezki (Sony) 		 * Split no edge of fit VA.
142368ad4a33SUladzislau Rezki (Sony) 		 *
142468ad4a33SUladzislau Rezki (Sony) 		 *     |       |
142568ad4a33SUladzislau Rezki (Sony) 		 *   L V  NVA  V R
142668ad4a33SUladzislau Rezki (Sony) 		 * |---|-------|---|
142768ad4a33SUladzislau Rezki (Sony) 		 */
142882dd23e8SUladzislau Rezki (Sony) 		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
142982dd23e8SUladzislau Rezki (Sony) 		if (unlikely(!lva)) {
143082dd23e8SUladzislau Rezki (Sony) 			/*
143182dd23e8SUladzislau Rezki (Sony) 			 * For percpu allocator we do not do any pre-allocation
143282dd23e8SUladzislau Rezki (Sony) 			 * and leave it as it is. The reason is it most likely
143382dd23e8SUladzislau Rezki (Sony) 			 * never ends up with NE_FIT_TYPE splitting. In case of
143482dd23e8SUladzislau Rezki (Sony) 			 * percpu allocations offsets and sizes are aligned to
143582dd23e8SUladzislau Rezki (Sony) 			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
143682dd23e8SUladzislau Rezki (Sony) 			 * are its main fitting cases.
143782dd23e8SUladzislau Rezki (Sony) 			 *
143882dd23e8SUladzislau Rezki (Sony) 			 * There are a few exceptions though, as an example it is
143982dd23e8SUladzislau Rezki (Sony) 			 * a first allocation (early boot up) when we have "one"
144082dd23e8SUladzislau Rezki (Sony) 			 * big free space that has to be split.
1441060650a2SUladzislau Rezki (Sony) 			 *
1442060650a2SUladzislau Rezki (Sony) 			 * Also we can hit this path in case of regular "vmap"
1443060650a2SUladzislau Rezki (Sony) 			 * allocations, if "this" current CPU was not preloaded.
1444060650a2SUladzislau Rezki (Sony) 			 * See the comment in alloc_vmap_area() why. If so, then
1445060650a2SUladzislau Rezki (Sony) 			 * GFP_NOWAIT is used instead to get an extra object for
1446060650a2SUladzislau Rezki (Sony) 			 * split purpose. That is rare and most time does not
1447060650a2SUladzislau Rezki (Sony) 			 * occur.
1448060650a2SUladzislau Rezki (Sony) 			 *
1449060650a2SUladzislau Rezki (Sony) 			 * What happens if an allocation gets failed. Basically,
1450060650a2SUladzislau Rezki (Sony) 			 * an "overflow" path is triggered to purge lazily freed
1451060650a2SUladzislau Rezki (Sony) 			 * areas to free some memory, then, the "retry" path is
1452060650a2SUladzislau Rezki (Sony) 			 * triggered to repeat one more time. See more details
1453060650a2SUladzislau Rezki (Sony) 			 * in alloc_vmap_area() function.
145482dd23e8SUladzislau Rezki (Sony) 			 */
145568ad4a33SUladzislau Rezki (Sony) 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
145682dd23e8SUladzislau Rezki (Sony) 			if (!lva)
145768ad4a33SUladzislau Rezki (Sony) 				return -1;
145882dd23e8SUladzislau Rezki (Sony) 		}
145968ad4a33SUladzislau Rezki (Sony) 
146068ad4a33SUladzislau Rezki (Sony) 		/*
146168ad4a33SUladzislau Rezki (Sony) 		 * Build the remainder.
146268ad4a33SUladzislau Rezki (Sony) 		 */
146368ad4a33SUladzislau Rezki (Sony) 		lva->va_start = va->va_start;
146468ad4a33SUladzislau Rezki (Sony) 		lva->va_end = nva_start_addr;
146568ad4a33SUladzislau Rezki (Sony) 
146668ad4a33SUladzislau Rezki (Sony) 		/*
146768ad4a33SUladzislau Rezki (Sony) 		 * Shrink this VA to remaining size.
146868ad4a33SUladzislau Rezki (Sony) 		 */
146968ad4a33SUladzislau Rezki (Sony) 		va->va_start = nva_start_addr + size;
147068ad4a33SUladzislau Rezki (Sony) 	} else {
147168ad4a33SUladzislau Rezki (Sony) 		return -1;
147268ad4a33SUladzislau Rezki (Sony) 	}
147368ad4a33SUladzislau Rezki (Sony) 
147468ad4a33SUladzislau Rezki (Sony) 	if (type != FL_FIT_TYPE) {
147568ad4a33SUladzislau Rezki (Sony) 		augment_tree_propagate_from(va);
147668ad4a33SUladzislau Rezki (Sony) 
14772c929233SArnd Bergmann 		if (lva)	/* type == NE_FIT_TYPE */
1478f9863be4SUladzislau Rezki (Sony) 			insert_vmap_area_augment(lva, &va->rb_node, root, head);
147968ad4a33SUladzislau Rezki (Sony) 	}
148068ad4a33SUladzislau Rezki (Sony) 
148168ad4a33SUladzislau Rezki (Sony) 	return 0;
148268ad4a33SUladzislau Rezki (Sony) }
148368ad4a33SUladzislau Rezki (Sony) 
148468ad4a33SUladzislau Rezki (Sony) /*
148568ad4a33SUladzislau Rezki (Sony)  * Returns a start address of the newly allocated area, if success.
148668ad4a33SUladzislau Rezki (Sony)  * Otherwise a vend is returned that indicates failure.
148768ad4a33SUladzislau Rezki (Sony)  */
148868ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long
1489f9863be4SUladzislau Rezki (Sony) __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1490f9863be4SUladzislau Rezki (Sony) 	unsigned long size, unsigned long align,
1491cacca6baSUladzislau Rezki (Sony) 	unsigned long vstart, unsigned long vend)
149268ad4a33SUladzislau Rezki (Sony) {
14939333fe98SUladzislau Rezki 	bool adjust_search_size = true;
149468ad4a33SUladzislau Rezki (Sony) 	unsigned long nva_start_addr;
149568ad4a33SUladzislau Rezki (Sony) 	struct vmap_area *va;
149668ad4a33SUladzislau Rezki (Sony) 	int ret;
149768ad4a33SUladzislau Rezki (Sony) 
14989333fe98SUladzislau Rezki 	/*
14999333fe98SUladzislau Rezki 	 * Do not adjust when:
15009333fe98SUladzislau Rezki 	 *   a) align <= PAGE_SIZE, because it does not make any sense.
15019333fe98SUladzislau Rezki 	 *      All blocks(their start addresses) are at least PAGE_SIZE
15029333fe98SUladzislau Rezki 	 *      aligned anyway;
15039333fe98SUladzislau Rezki 	 *   b) a short range where a requested size corresponds to exactly
15049333fe98SUladzislau Rezki 	 *      specified [vstart:vend] interval and an alignment > PAGE_SIZE.
15059333fe98SUladzislau Rezki 	 *      With adjusted search length an allocation would not succeed.
15069333fe98SUladzislau Rezki 	 */
15079333fe98SUladzislau Rezki 	if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
15089333fe98SUladzislau Rezki 		adjust_search_size = false;
15099333fe98SUladzislau Rezki 
1510f9863be4SUladzislau Rezki (Sony) 	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
151168ad4a33SUladzislau Rezki (Sony) 	if (unlikely(!va))
151268ad4a33SUladzislau Rezki (Sony) 		return vend;
151368ad4a33SUladzislau Rezki (Sony) 
151468ad4a33SUladzislau Rezki (Sony) 	if (va->va_start > vstart)
151568ad4a33SUladzislau Rezki (Sony) 		nva_start_addr = ALIGN(va->va_start, align);
151668ad4a33SUladzislau Rezki (Sony) 	else
151768ad4a33SUladzislau Rezki (Sony) 		nva_start_addr = ALIGN(vstart, align);
151868ad4a33SUladzislau Rezki (Sony) 
151968ad4a33SUladzislau Rezki (Sony) 	/* Check the "vend" restriction. */
152068ad4a33SUladzislau Rezki (Sony) 	if (nva_start_addr + size > vend)
152168ad4a33SUladzislau Rezki (Sony) 		return vend;
152268ad4a33SUladzislau Rezki (Sony) 
152368ad4a33SUladzislau Rezki (Sony) 	/* Update the free vmap_area. */
1524f9863be4SUladzislau Rezki (Sony) 	ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
15251b23ff80SBaoquan He 	if (WARN_ON_ONCE(ret))
152668ad4a33SUladzislau Rezki (Sony) 		return vend;
152768ad4a33SUladzislau Rezki (Sony) 
1528a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1529bd1264c3SSong Liu 	find_vmap_lowest_match_check(root, head, size, align);
1530a6cf4e0fSUladzislau Rezki (Sony) #endif
1531a6cf4e0fSUladzislau Rezki (Sony) 
153268ad4a33SUladzislau Rezki (Sony) 	return nva_start_addr;
153368ad4a33SUladzislau Rezki (Sony) }
15344da56b99SChris Wilson 
1535db64fe02SNick Piggin /*
1536d98c9e83SAndrey Ryabinin  * Free a region of KVA allocated by alloc_vmap_area
1537d98c9e83SAndrey Ryabinin  */
1538d98c9e83SAndrey Ryabinin static void free_vmap_area(struct vmap_area *va)
1539d98c9e83SAndrey Ryabinin {
1540d98c9e83SAndrey Ryabinin 	/*
1541d98c9e83SAndrey Ryabinin 	 * Remove from the busy tree/list.
1542d98c9e83SAndrey Ryabinin 	 */
1543d98c9e83SAndrey Ryabinin 	spin_lock(&vmap_area_lock);
1544d98c9e83SAndrey Ryabinin 	unlink_va(va, &vmap_area_root);
1545d98c9e83SAndrey Ryabinin 	spin_unlock(&vmap_area_lock);
1546d98c9e83SAndrey Ryabinin 
1547d98c9e83SAndrey Ryabinin 	/*
1548d98c9e83SAndrey Ryabinin 	 * Insert/Merge it back to the free tree/list.
1549d98c9e83SAndrey Ryabinin 	 */
1550d98c9e83SAndrey Ryabinin 	spin_lock(&free_vmap_area_lock);
155196e2db45SUladzislau Rezki (Sony) 	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1552d98c9e83SAndrey Ryabinin 	spin_unlock(&free_vmap_area_lock);
1553d98c9e83SAndrey Ryabinin }
1554d98c9e83SAndrey Ryabinin 
1555187f8cc4SUladzislau Rezki (Sony) static inline void
1556187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1557187f8cc4SUladzislau Rezki (Sony) {
1558187f8cc4SUladzislau Rezki (Sony) 	struct vmap_area *va = NULL;
1559187f8cc4SUladzislau Rezki (Sony) 
1560187f8cc4SUladzislau Rezki (Sony) 	/*
1561187f8cc4SUladzislau Rezki (Sony) 	 * Preload this CPU with one extra vmap_area object. It is used
1562187f8cc4SUladzislau Rezki (Sony) 	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1563187f8cc4SUladzislau Rezki (Sony) 	 * a CPU that does an allocation is preloaded.
1564187f8cc4SUladzislau Rezki (Sony) 	 *
1565187f8cc4SUladzislau Rezki (Sony) 	 * We do it in non-atomic context, thus it allows us to use more
1566187f8cc4SUladzislau Rezki (Sony) 	 * permissive allocation masks to be more stable under low memory
1567187f8cc4SUladzislau Rezki (Sony) 	 * condition and high memory pressure.
1568187f8cc4SUladzislau Rezki (Sony) 	 */
1569187f8cc4SUladzislau Rezki (Sony) 	if (!this_cpu_read(ne_fit_preload_node))
1570187f8cc4SUladzislau Rezki (Sony) 		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1571187f8cc4SUladzislau Rezki (Sony) 
1572187f8cc4SUladzislau Rezki (Sony) 	spin_lock(lock);
1573187f8cc4SUladzislau Rezki (Sony) 
1574187f8cc4SUladzislau Rezki (Sony) 	if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1575187f8cc4SUladzislau Rezki (Sony) 		kmem_cache_free(vmap_area_cachep, va);
1576187f8cc4SUladzislau Rezki (Sony) }
1577187f8cc4SUladzislau Rezki (Sony) 
1578d98c9e83SAndrey Ryabinin /*
1579db64fe02SNick Piggin  * Allocate a region of KVA of the specified size and alignment, within the
1580db64fe02SNick Piggin  * vstart and vend.
1581db64fe02SNick Piggin  */
1582db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size,
1583db64fe02SNick Piggin 				unsigned long align,
1584db64fe02SNick Piggin 				unsigned long vstart, unsigned long vend,
1585869176a0SBaoquan He 				int node, gfp_t gfp_mask,
1586869176a0SBaoquan He 				unsigned long va_flags)
1587db64fe02SNick Piggin {
1588187f8cc4SUladzislau Rezki (Sony) 	struct vmap_area *va;
158912e376a6SUladzislau Rezki (Sony) 	unsigned long freed;
15901da177e4SLinus Torvalds 	unsigned long addr;
1591db64fe02SNick Piggin 	int purged = 0;
1592d98c9e83SAndrey Ryabinin 	int ret;
1593db64fe02SNick Piggin 
15947e4a32c0SHyunmin Lee 	if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
15957e4a32c0SHyunmin Lee 		return ERR_PTR(-EINVAL);
1596db64fe02SNick Piggin 
159768ad4a33SUladzislau Rezki (Sony) 	if (unlikely(!vmap_initialized))
159868ad4a33SUladzislau Rezki (Sony) 		return ERR_PTR(-EBUSY);
159968ad4a33SUladzislau Rezki (Sony) 
16005803ed29SChristoph Hellwig 	might_sleep();
1601f07116d7SUladzislau Rezki (Sony) 	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
16024da56b99SChris Wilson 
1603f07116d7SUladzislau Rezki (Sony) 	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1604db64fe02SNick Piggin 	if (unlikely(!va))
1605db64fe02SNick Piggin 		return ERR_PTR(-ENOMEM);
1606db64fe02SNick Piggin 
16077f88f88fSCatalin Marinas 	/*
16087f88f88fSCatalin Marinas 	 * Only scan the relevant parts containing pointers to other objects
16097f88f88fSCatalin Marinas 	 * to avoid false negatives.
16107f88f88fSCatalin Marinas 	 */
1611f07116d7SUladzislau Rezki (Sony) 	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
16127f88f88fSCatalin Marinas 
1613db64fe02SNick Piggin retry:
1614187f8cc4SUladzislau Rezki (Sony) 	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1615f9863be4SUladzislau Rezki (Sony) 	addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
1616f9863be4SUladzislau Rezki (Sony) 		size, align, vstart, vend);
1617187f8cc4SUladzislau Rezki (Sony) 	spin_unlock(&free_vmap_area_lock);
161868ad4a33SUladzislau Rezki (Sony) 
1619cf243da6SUladzislau Rezki (Sony) 	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
1620cf243da6SUladzislau Rezki (Sony) 
162189699605SNick Piggin 	/*
162268ad4a33SUladzislau Rezki (Sony) 	 * If an allocation fails, the "vend" address is
162368ad4a33SUladzislau Rezki (Sony) 	 * returned. Therefore trigger the overflow path.
162489699605SNick Piggin 	 */
162568ad4a33SUladzislau Rezki (Sony) 	if (unlikely(addr == vend))
162689699605SNick Piggin 		goto overflow;
162789699605SNick Piggin 
162889699605SNick Piggin 	va->va_start = addr;
162989699605SNick Piggin 	va->va_end = addr + size;
1630688fcbfcSPengfei Li 	va->vm = NULL;
1631869176a0SBaoquan He 	va->flags = va_flags;
163268ad4a33SUladzislau Rezki (Sony) 
1633e36176beSUladzislau Rezki (Sony) 	spin_lock(&vmap_area_lock);
1634e36176beSUladzislau Rezki (Sony) 	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
163589699605SNick Piggin 	spin_unlock(&vmap_area_lock);
163689699605SNick Piggin 
163761e16557SWang Xiaoqiang 	BUG_ON(!IS_ALIGNED(va->va_start, align));
163889699605SNick Piggin 	BUG_ON(va->va_start < vstart);
163989699605SNick Piggin 	BUG_ON(va->va_end > vend);
164089699605SNick Piggin 
1641d98c9e83SAndrey Ryabinin 	ret = kasan_populate_vmalloc(addr, size);
1642d98c9e83SAndrey Ryabinin 	if (ret) {
1643d98c9e83SAndrey Ryabinin 		free_vmap_area(va);
1644d98c9e83SAndrey Ryabinin 		return ERR_PTR(ret);
1645d98c9e83SAndrey Ryabinin 	}
1646d98c9e83SAndrey Ryabinin 
164789699605SNick Piggin 	return va;
164889699605SNick Piggin 
16497766970cSNick Piggin overflow:
1650db64fe02SNick Piggin 	if (!purged) {
165177e50af0SThomas Gleixner 		reclaim_and_purge_vmap_areas();
1652db64fe02SNick Piggin 		purged = 1;
1653db64fe02SNick Piggin 		goto retry;
1654db64fe02SNick Piggin 	}
16554da56b99SChris Wilson 
165612e376a6SUladzislau Rezki (Sony) 	freed = 0;
16574da56b99SChris Wilson 	blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
165812e376a6SUladzislau Rezki (Sony) 
16594da56b99SChris Wilson 	if (freed > 0) {
16604da56b99SChris Wilson 		purged = 0;
16614da56b99SChris Wilson 		goto retry;
16624da56b99SChris Wilson 	}
16634da56b99SChris Wilson 
166403497d76SFlorian Fainelli 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1665756a025fSJoe Perches 		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1666756a025fSJoe Perches 			size);
166768ad4a33SUladzislau Rezki (Sony) 
166868ad4a33SUladzislau Rezki (Sony) 	kmem_cache_free(vmap_area_cachep, va);
1669db64fe02SNick Piggin 	return ERR_PTR(-EBUSY);
1670db64fe02SNick Piggin }
1671db64fe02SNick Piggin 
16724da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb)
16734da56b99SChris Wilson {
16744da56b99SChris Wilson 	return blocking_notifier_chain_register(&vmap_notify_list, nb);
16754da56b99SChris Wilson }
16764da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
16774da56b99SChris Wilson 
16784da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb)
16794da56b99SChris Wilson {
16804da56b99SChris Wilson 	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
16814da56b99SChris Wilson }
16824da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
16834da56b99SChris Wilson 
1684db64fe02SNick Piggin /*
1685db64fe02SNick Piggin  * lazy_max_pages is the maximum amount of virtual address space we gather up
1686db64fe02SNick Piggin  * before attempting to purge with a TLB flush.
1687db64fe02SNick Piggin  *
1688db64fe02SNick Piggin  * There is a tradeoff here: a larger number will cover more kernel page tables
1689db64fe02SNick Piggin  * and take slightly longer to purge, but it will linearly reduce the number of
1690db64fe02SNick Piggin  * global TLB flushes that must be performed. It would seem natural to scale
1691db64fe02SNick Piggin  * this number up linearly with the number of CPUs (because vmapping activity
1692db64fe02SNick Piggin  * could also scale linearly with the number of CPUs), however it is likely
1693db64fe02SNick Piggin  * that in practice, workloads might be constrained in other ways that mean
1694db64fe02SNick Piggin  * vmap activity will not scale linearly with CPUs. Also, I want to be
1695db64fe02SNick Piggin  * conservative and not introduce a big latency on huge systems, so go with
1696db64fe02SNick Piggin  * a less aggressive log scale. It will still be an improvement over the old
1697db64fe02SNick Piggin  * code, and it will be simple to change the scale factor if we find that it
1698db64fe02SNick Piggin  * becomes a problem on bigger systems.
1699db64fe02SNick Piggin  */
1700db64fe02SNick Piggin static unsigned long lazy_max_pages(void)
1701db64fe02SNick Piggin {
1702db64fe02SNick Piggin 	unsigned int log;
1703db64fe02SNick Piggin 
1704db64fe02SNick Piggin 	log = fls(num_online_cpus());
1705db64fe02SNick Piggin 
1706db64fe02SNick Piggin 	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1707db64fe02SNick Piggin }
1708db64fe02SNick Piggin 
17094d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1710db64fe02SNick Piggin 
17110574ecd1SChristoph Hellwig /*
1712f0953a1bSIngo Molnar  * Serialize vmap purging.  There is no actual critical section protected
1713153090f2SBaoquan He  * by this lock, but we want to avoid concurrent calls for performance
17140574ecd1SChristoph Hellwig  * reasons and to make the pcpu_get_vm_areas more deterministic.
17150574ecd1SChristoph Hellwig  */
1716f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock);
17170574ecd1SChristoph Hellwig 
171802b709dfSNick Piggin /* for per-CPU blocks */
171902b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void);
172002b709dfSNick Piggin 
17213ee48b6aSCliff Wickman /*
1722db64fe02SNick Piggin  * Purges all lazily-freed vmap areas.
1723db64fe02SNick Piggin  */
17240574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1725db64fe02SNick Piggin {
17264d36e6f8SUladzislau Rezki (Sony) 	unsigned long resched_threshold;
17276030fd5fSUladzislau Rezki (Sony) 	unsigned int num_purged_areas = 0;
1728baa468a6SBaoquan He 	struct list_head local_purge_list;
172996e2db45SUladzislau Rezki (Sony) 	struct vmap_area *va, *n_va;
1730db64fe02SNick Piggin 
17310574ecd1SChristoph Hellwig 	lockdep_assert_held(&vmap_purge_lock);
173202b709dfSNick Piggin 
173396e2db45SUladzislau Rezki (Sony) 	spin_lock(&purge_vmap_area_lock);
173496e2db45SUladzislau Rezki (Sony) 	purge_vmap_area_root = RB_ROOT;
1735baa468a6SBaoquan He 	list_replace_init(&purge_vmap_area_list, &local_purge_list);
173696e2db45SUladzislau Rezki (Sony) 	spin_unlock(&purge_vmap_area_lock);
173796e2db45SUladzislau Rezki (Sony) 
1738baa468a6SBaoquan He 	if (unlikely(list_empty(&local_purge_list)))
17396030fd5fSUladzislau Rezki (Sony) 		goto out;
174068571be9SUladzislau Rezki (Sony) 
174196e2db45SUladzislau Rezki (Sony) 	start = min(start,
1742baa468a6SBaoquan He 		list_first_entry(&local_purge_list,
174396e2db45SUladzislau Rezki (Sony) 			struct vmap_area, list)->va_start);
174496e2db45SUladzislau Rezki (Sony) 
174596e2db45SUladzislau Rezki (Sony) 	end = max(end,
1746baa468a6SBaoquan He 		list_last_entry(&local_purge_list,
174796e2db45SUladzislau Rezki (Sony) 			struct vmap_area, list)->va_end);
1748db64fe02SNick Piggin 
17490574ecd1SChristoph Hellwig 	flush_tlb_kernel_range(start, end);
17504d36e6f8SUladzislau Rezki (Sony) 	resched_threshold = lazy_max_pages() << 1;
1751db64fe02SNick Piggin 
1752e36176beSUladzislau Rezki (Sony) 	spin_lock(&free_vmap_area_lock);
1753baa468a6SBaoquan He 	list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
17544d36e6f8SUladzislau Rezki (Sony) 		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
17553c5c3cfbSDaniel Axtens 		unsigned long orig_start = va->va_start;
17563c5c3cfbSDaniel Axtens 		unsigned long orig_end = va->va_end;
1757763b218dSJoel Fernandes 
1758dd3b8353SUladzislau Rezki (Sony) 		/*
1759dd3b8353SUladzislau Rezki (Sony) 		 * Finally insert or merge lazily-freed area. It is
1760dd3b8353SUladzislau Rezki (Sony) 		 * detached and there is no need to "unlink" it from
1761dd3b8353SUladzislau Rezki (Sony) 		 * anything.
1762dd3b8353SUladzislau Rezki (Sony) 		 */
176396e2db45SUladzislau Rezki (Sony) 		va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
17643c5c3cfbSDaniel Axtens 				&free_vmap_area_list);
17653c5c3cfbSDaniel Axtens 
17669c801f61SUladzislau Rezki (Sony) 		if (!va)
17679c801f61SUladzislau Rezki (Sony) 			continue;
17689c801f61SUladzislau Rezki (Sony) 
17693c5c3cfbSDaniel Axtens 		if (is_vmalloc_or_module_addr((void *)orig_start))
17703c5c3cfbSDaniel Axtens 			kasan_release_vmalloc(orig_start, orig_end,
17713c5c3cfbSDaniel Axtens 					      va->va_start, va->va_end);
1772dd3b8353SUladzislau Rezki (Sony) 
17734d36e6f8SUladzislau Rezki (Sony) 		atomic_long_sub(nr, &vmap_lazy_nr);
17746030fd5fSUladzislau Rezki (Sony) 		num_purged_areas++;
177568571be9SUladzislau Rezki (Sony) 
17764d36e6f8SUladzislau Rezki (Sony) 		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1777e36176beSUladzislau Rezki (Sony) 			cond_resched_lock(&free_vmap_area_lock);
1778763b218dSJoel Fernandes 	}
1779e36176beSUladzislau Rezki (Sony) 	spin_unlock(&free_vmap_area_lock);
17806030fd5fSUladzislau Rezki (Sony) 
17816030fd5fSUladzislau Rezki (Sony) out:
17826030fd5fSUladzislau Rezki (Sony) 	trace_purge_vmap_area_lazy(start, end, num_purged_areas);
17836030fd5fSUladzislau Rezki (Sony) 	return num_purged_areas > 0;
1784db64fe02SNick Piggin }
1785db64fe02SNick Piggin 
1786db64fe02SNick Piggin /*
178777e50af0SThomas Gleixner  * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
1788db64fe02SNick Piggin  */
178977e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void)
179077e50af0SThomas Gleixner 
1791db64fe02SNick Piggin {
1792f9e09977SChristoph Hellwig 	mutex_lock(&vmap_purge_lock);
17930574ecd1SChristoph Hellwig 	purge_fragmented_blocks_allcpus();
17940574ecd1SChristoph Hellwig 	__purge_vmap_area_lazy(ULONG_MAX, 0);
1795f9e09977SChristoph Hellwig 	mutex_unlock(&vmap_purge_lock);
1796db64fe02SNick Piggin }
1797db64fe02SNick Piggin 
1798690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work)
1799690467c8SUladzislau Rezki (Sony) {
1800690467c8SUladzislau Rezki (Sony) 	unsigned long nr_lazy;
1801690467c8SUladzislau Rezki (Sony) 
1802690467c8SUladzislau Rezki (Sony) 	do {
1803690467c8SUladzislau Rezki (Sony) 		mutex_lock(&vmap_purge_lock);
1804690467c8SUladzislau Rezki (Sony) 		__purge_vmap_area_lazy(ULONG_MAX, 0);
1805690467c8SUladzislau Rezki (Sony) 		mutex_unlock(&vmap_purge_lock);
1806690467c8SUladzislau Rezki (Sony) 
1807690467c8SUladzislau Rezki (Sony) 		/* Recheck if further work is required. */
1808690467c8SUladzislau Rezki (Sony) 		nr_lazy = atomic_long_read(&vmap_lazy_nr);
1809690467c8SUladzislau Rezki (Sony) 	} while (nr_lazy > lazy_max_pages());
1810690467c8SUladzislau Rezki (Sony) }
1811690467c8SUladzislau Rezki (Sony) 
1812db64fe02SNick Piggin /*
1813edd89818SUladzislau Rezki (Sony)  * Free a vmap area, caller ensuring that the area has been unmapped,
1814edd89818SUladzislau Rezki (Sony)  * unlinked and flush_cache_vunmap had been called for the correct
1815edd89818SUladzislau Rezki (Sony)  * range previously.
1816db64fe02SNick Piggin  */
181764141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va)
1818db64fe02SNick Piggin {
18198c4196feSUladzislau Rezki (Sony) 	unsigned long nr_lazy_max = lazy_max_pages();
18208c4196feSUladzislau Rezki (Sony) 	unsigned long va_start = va->va_start;
18214d36e6f8SUladzislau Rezki (Sony) 	unsigned long nr_lazy;
182280c4bd7aSChris Wilson 
1823edd89818SUladzislau Rezki (Sony) 	if (WARN_ON_ONCE(!list_empty(&va->list)))
1824edd89818SUladzislau Rezki (Sony) 		return;
1825dd3b8353SUladzislau Rezki (Sony) 
18264d36e6f8SUladzislau Rezki (Sony) 	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
18274d36e6f8SUladzislau Rezki (Sony) 				PAGE_SHIFT, &vmap_lazy_nr);
182880c4bd7aSChris Wilson 
182996e2db45SUladzislau Rezki (Sony) 	/*
183096e2db45SUladzislau Rezki (Sony) 	 * Merge or place it to the purge tree/list.
183196e2db45SUladzislau Rezki (Sony) 	 */
183296e2db45SUladzislau Rezki (Sony) 	spin_lock(&purge_vmap_area_lock);
183396e2db45SUladzislau Rezki (Sony) 	merge_or_add_vmap_area(va,
183496e2db45SUladzislau Rezki (Sony) 		&purge_vmap_area_root, &purge_vmap_area_list);
183596e2db45SUladzislau Rezki (Sony) 	spin_unlock(&purge_vmap_area_lock);
183680c4bd7aSChris Wilson 
18378c4196feSUladzislau Rezki (Sony) 	trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
18388c4196feSUladzislau Rezki (Sony) 
183996e2db45SUladzislau Rezki (Sony) 	/* After this point, we may free va at any time */
18408c4196feSUladzislau Rezki (Sony) 	if (unlikely(nr_lazy > nr_lazy_max))
1841690467c8SUladzislau Rezki (Sony) 		schedule_work(&drain_vmap_work);
1842db64fe02SNick Piggin }
1843db64fe02SNick Piggin 
1844b29acbdcSNick Piggin /*
1845b29acbdcSNick Piggin  * Free and unmap a vmap area
1846b29acbdcSNick Piggin  */
1847b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va)
1848b29acbdcSNick Piggin {
1849b29acbdcSNick Piggin 	flush_cache_vunmap(va->va_start, va->va_end);
18504ad0ae8cSNicholas Piggin 	vunmap_range_noflush(va->va_start, va->va_end);
18518e57f8acSVlastimil Babka 	if (debug_pagealloc_enabled_static())
185282a2e924SChintan Pandya 		flush_tlb_kernel_range(va->va_start, va->va_end);
185382a2e924SChintan Pandya 
1854c8eef01eSChristoph Hellwig 	free_vmap_area_noflush(va);
1855b29acbdcSNick Piggin }
1856b29acbdcSNick Piggin 
1857993d0b28SMatthew Wilcox (Oracle) struct vmap_area *find_vmap_area(unsigned long addr)
1858db64fe02SNick Piggin {
1859db64fe02SNick Piggin 	struct vmap_area *va;
1860db64fe02SNick Piggin 
1861db64fe02SNick Piggin 	spin_lock(&vmap_area_lock);
1862899c6efeSUladzislau Rezki (Sony) 	va = __find_vmap_area(addr, &vmap_area_root);
1863db64fe02SNick Piggin 	spin_unlock(&vmap_area_lock);
1864db64fe02SNick Piggin 
1865db64fe02SNick Piggin 	return va;
1866db64fe02SNick Piggin }
1867db64fe02SNick Piggin 
1868edd89818SUladzislau Rezki (Sony) static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
1869edd89818SUladzislau Rezki (Sony) {
1870edd89818SUladzislau Rezki (Sony) 	struct vmap_area *va;
1871edd89818SUladzislau Rezki (Sony) 
1872edd89818SUladzislau Rezki (Sony) 	spin_lock(&vmap_area_lock);
1873edd89818SUladzislau Rezki (Sony) 	va = __find_vmap_area(addr, &vmap_area_root);
1874edd89818SUladzislau Rezki (Sony) 	if (va)
1875edd89818SUladzislau Rezki (Sony) 		unlink_va(va, &vmap_area_root);
1876edd89818SUladzislau Rezki (Sony) 	spin_unlock(&vmap_area_lock);
1877edd89818SUladzislau Rezki (Sony) 
1878edd89818SUladzislau Rezki (Sony) 	return va;
1879edd89818SUladzislau Rezki (Sony) }
1880edd89818SUladzislau Rezki (Sony) 
1881db64fe02SNick Piggin /*** Per cpu kva allocator ***/
1882db64fe02SNick Piggin 
1883db64fe02SNick Piggin /*
1884db64fe02SNick Piggin  * vmap space is limited especially on 32 bit architectures. Ensure there is
1885db64fe02SNick Piggin  * room for at least 16 percpu vmap blocks per CPU.
1886db64fe02SNick Piggin  */
1887db64fe02SNick Piggin /*
1888db64fe02SNick Piggin  * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1889db64fe02SNick Piggin  * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1890db64fe02SNick Piggin  * instead (we just need a rough idea)
1891db64fe02SNick Piggin  */
1892db64fe02SNick Piggin #if BITS_PER_LONG == 32
1893db64fe02SNick Piggin #define VMALLOC_SPACE		(128UL*1024*1024)
1894db64fe02SNick Piggin #else
1895db64fe02SNick Piggin #define VMALLOC_SPACE		(128UL*1024*1024*1024)
1896db64fe02SNick Piggin #endif
1897db64fe02SNick Piggin 
1898db64fe02SNick Piggin #define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1899db64fe02SNick Piggin #define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1900db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1901db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1902db64fe02SNick Piggin #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1903db64fe02SNick Piggin #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1904f982f915SClemens Ladisch #define VMAP_BBMAP_BITS		\
1905f982f915SClemens Ladisch 		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1906db64fe02SNick Piggin 		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1907f982f915SClemens Ladisch 			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1908db64fe02SNick Piggin 
1909db64fe02SNick Piggin #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1910db64fe02SNick Piggin 
191177e50af0SThomas Gleixner /*
191277e50af0SThomas Gleixner  * Purge threshold to prevent overeager purging of fragmented blocks for
191377e50af0SThomas Gleixner  * regular operations: Purge if vb->free is less than 1/4 of the capacity.
191477e50af0SThomas Gleixner  */
191577e50af0SThomas Gleixner #define VMAP_PURGE_THRESHOLD	(VMAP_BBMAP_BITS / 4)
191677e50af0SThomas Gleixner 
1917869176a0SBaoquan He #define VMAP_RAM		0x1 /* indicates vm_map_ram area*/
1918869176a0SBaoquan He #define VMAP_BLOCK		0x2 /* mark out the vmap_block sub-type*/
1919869176a0SBaoquan He #define VMAP_FLAGS_MASK		0x3
1920869176a0SBaoquan He 
1921db64fe02SNick Piggin struct vmap_block_queue {
1922db64fe02SNick Piggin 	spinlock_t lock;
1923db64fe02SNick Piggin 	struct list_head free;
1924062eacf5SUladzislau Rezki (Sony) 
1925062eacf5SUladzislau Rezki (Sony) 	/*
1926062eacf5SUladzislau Rezki (Sony) 	 * An xarray requires an extra memory dynamically to
1927062eacf5SUladzislau Rezki (Sony) 	 * be allocated. If it is an issue, we can use rb-tree
1928062eacf5SUladzislau Rezki (Sony) 	 * instead.
1929062eacf5SUladzislau Rezki (Sony) 	 */
1930062eacf5SUladzislau Rezki (Sony) 	struct xarray vmap_blocks;
1931db64fe02SNick Piggin };
1932db64fe02SNick Piggin 
1933db64fe02SNick Piggin struct vmap_block {
1934db64fe02SNick Piggin 	spinlock_t lock;
1935db64fe02SNick Piggin 	struct vmap_area *va;
1936db64fe02SNick Piggin 	unsigned long free, dirty;
1937d76f9954SBaoquan He 	DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
19387d61bfe8SRoman Pen 	unsigned long dirty_min, dirty_max; /*< dirty range */
1939db64fe02SNick Piggin 	struct list_head free_list;
1940db64fe02SNick Piggin 	struct rcu_head rcu_head;
194102b709dfSNick Piggin 	struct list_head purge;
1942db64fe02SNick Piggin };
1943db64fe02SNick Piggin 
1944db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1945db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1946db64fe02SNick Piggin 
1947db64fe02SNick Piggin /*
1948062eacf5SUladzislau Rezki (Sony)  * In order to fast access to any "vmap_block" associated with a
1949062eacf5SUladzislau Rezki (Sony)  * specific address, we use a hash.
1950062eacf5SUladzislau Rezki (Sony)  *
1951062eacf5SUladzislau Rezki (Sony)  * A per-cpu vmap_block_queue is used in both ways, to serialize
1952062eacf5SUladzislau Rezki (Sony)  * an access to free block chains among CPUs(alloc path) and it
1953062eacf5SUladzislau Rezki (Sony)  * also acts as a vmap_block hash(alloc/free paths). It means we
1954062eacf5SUladzislau Rezki (Sony)  * overload it, since we already have the per-cpu array which is
1955062eacf5SUladzislau Rezki (Sony)  * used as a hash table. When used as a hash a 'cpu' passed to
1956062eacf5SUladzislau Rezki (Sony)  * per_cpu() is not actually a CPU but rather a hash index.
1957062eacf5SUladzislau Rezki (Sony)  *
1958fa1c77c1SUladzislau Rezki (Sony)  * A hash function is addr_to_vb_xa() which hashes any address
1959062eacf5SUladzislau Rezki (Sony)  * to a specific index(in a hash) it belongs to. This then uses a
1960062eacf5SUladzislau Rezki (Sony)  * per_cpu() macro to access an array with generated index.
1961062eacf5SUladzislau Rezki (Sony)  *
1962062eacf5SUladzislau Rezki (Sony)  * An example:
1963062eacf5SUladzislau Rezki (Sony)  *
1964062eacf5SUladzislau Rezki (Sony)  *  CPU_1  CPU_2  CPU_0
1965062eacf5SUladzislau Rezki (Sony)  *    |      |      |
1966062eacf5SUladzislau Rezki (Sony)  *    V      V      V
1967062eacf5SUladzislau Rezki (Sony)  * 0     10     20     30     40     50     60
1968062eacf5SUladzislau Rezki (Sony)  * |------|------|------|------|------|------|...<vmap address space>
1969062eacf5SUladzislau Rezki (Sony)  *   CPU0   CPU1   CPU2   CPU0   CPU1   CPU2
1970062eacf5SUladzislau Rezki (Sony)  *
1971062eacf5SUladzislau Rezki (Sony)  * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
1972062eacf5SUladzislau Rezki (Sony)  *   it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
1973062eacf5SUladzislau Rezki (Sony)  *
1974062eacf5SUladzislau Rezki (Sony)  * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
1975062eacf5SUladzislau Rezki (Sony)  *   it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
1976062eacf5SUladzislau Rezki (Sony)  *
1977062eacf5SUladzislau Rezki (Sony)  * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
1978062eacf5SUladzislau Rezki (Sony)  *   it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
1979062eacf5SUladzislau Rezki (Sony)  *
1980062eacf5SUladzislau Rezki (Sony)  * This technique almost always avoids lock contention on insert/remove,
1981062eacf5SUladzislau Rezki (Sony)  * however xarray spinlocks protect against any contention that remains.
1982db64fe02SNick Piggin  */
1983062eacf5SUladzislau Rezki (Sony) static struct xarray *
1984fa1c77c1SUladzislau Rezki (Sony) addr_to_vb_xa(unsigned long addr)
1985062eacf5SUladzislau Rezki (Sony) {
1986062eacf5SUladzislau Rezki (Sony) 	int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
1987062eacf5SUladzislau Rezki (Sony) 
1988062eacf5SUladzislau Rezki (Sony) 	return &per_cpu(vmap_block_queue, index).vmap_blocks;
1989062eacf5SUladzislau Rezki (Sony) }
1990db64fe02SNick Piggin 
1991db64fe02SNick Piggin /*
1992db64fe02SNick Piggin  * We should probably have a fallback mechanism to allocate virtual memory
1993db64fe02SNick Piggin  * out of partially filled vmap blocks. However vmap block sizing should be
1994db64fe02SNick Piggin  * fairly reasonable according to the vmalloc size, so it shouldn't be a
1995db64fe02SNick Piggin  * big problem.
1996db64fe02SNick Piggin  */
1997db64fe02SNick Piggin 
1998db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr)
1999db64fe02SNick Piggin {
2000db64fe02SNick Piggin 	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2001db64fe02SNick Piggin 	addr /= VMAP_BLOCK_SIZE;
2002db64fe02SNick Piggin 	return addr;
2003db64fe02SNick Piggin }
2004db64fe02SNick Piggin 
2005cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2006cf725ce2SRoman Pen {
2007cf725ce2SRoman Pen 	unsigned long addr;
2008cf725ce2SRoman Pen 
2009cf725ce2SRoman Pen 	addr = va_start + (pages_off << PAGE_SHIFT);
2010cf725ce2SRoman Pen 	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2011cf725ce2SRoman Pen 	return (void *)addr;
2012cf725ce2SRoman Pen }
2013cf725ce2SRoman Pen 
2014cf725ce2SRoman Pen /**
2015cf725ce2SRoman Pen  * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2016cf725ce2SRoman Pen  *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
2017cf725ce2SRoman Pen  * @order:    how many 2^order pages should be occupied in newly allocated block
2018cf725ce2SRoman Pen  * @gfp_mask: flags for the page level allocator
2019cf725ce2SRoman Pen  *
2020a862f68aSMike Rapoport  * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2021cf725ce2SRoman Pen  */
2022cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2023db64fe02SNick Piggin {
2024db64fe02SNick Piggin 	struct vmap_block_queue *vbq;
2025db64fe02SNick Piggin 	struct vmap_block *vb;
2026db64fe02SNick Piggin 	struct vmap_area *va;
2027062eacf5SUladzislau Rezki (Sony) 	struct xarray *xa;
2028db64fe02SNick Piggin 	unsigned long vb_idx;
2029db64fe02SNick Piggin 	int node, err;
2030cf725ce2SRoman Pen 	void *vaddr;
2031db64fe02SNick Piggin 
2032db64fe02SNick Piggin 	node = numa_node_id();
2033db64fe02SNick Piggin 
2034db64fe02SNick Piggin 	vb = kmalloc_node(sizeof(struct vmap_block),
2035db64fe02SNick Piggin 			gfp_mask & GFP_RECLAIM_MASK, node);
2036db64fe02SNick Piggin 	if (unlikely(!vb))
2037db64fe02SNick Piggin 		return ERR_PTR(-ENOMEM);
2038db64fe02SNick Piggin 
2039db64fe02SNick Piggin 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2040db64fe02SNick Piggin 					VMALLOC_START, VMALLOC_END,
2041869176a0SBaoquan He 					node, gfp_mask,
2042869176a0SBaoquan He 					VMAP_RAM|VMAP_BLOCK);
2043ddf9c6d4STobias Klauser 	if (IS_ERR(va)) {
2044db64fe02SNick Piggin 		kfree(vb);
2045e7d86340SJulia Lawall 		return ERR_CAST(va);
2046db64fe02SNick Piggin 	}
2047db64fe02SNick Piggin 
2048cf725ce2SRoman Pen 	vaddr = vmap_block_vaddr(va->va_start, 0);
2049db64fe02SNick Piggin 	spin_lock_init(&vb->lock);
2050db64fe02SNick Piggin 	vb->va = va;
2051cf725ce2SRoman Pen 	/* At least something should be left free */
2052cf725ce2SRoman Pen 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2053d76f9954SBaoquan He 	bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2054cf725ce2SRoman Pen 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
2055db64fe02SNick Piggin 	vb->dirty = 0;
20567d61bfe8SRoman Pen 	vb->dirty_min = VMAP_BBMAP_BITS;
20577d61bfe8SRoman Pen 	vb->dirty_max = 0;
2058d76f9954SBaoquan He 	bitmap_set(vb->used_map, 0, (1UL << order));
2059db64fe02SNick Piggin 	INIT_LIST_HEAD(&vb->free_list);
2060db64fe02SNick Piggin 
2061fa1c77c1SUladzislau Rezki (Sony) 	xa = addr_to_vb_xa(va->va_start);
2062db64fe02SNick Piggin 	vb_idx = addr_to_vb_idx(va->va_start);
2063062eacf5SUladzislau Rezki (Sony) 	err = xa_insert(xa, vb_idx, vb, gfp_mask);
20640f14599cSMatthew Wilcox (Oracle) 	if (err) {
20650f14599cSMatthew Wilcox (Oracle) 		kfree(vb);
20660f14599cSMatthew Wilcox (Oracle) 		free_vmap_area(va);
20670f14599cSMatthew Wilcox (Oracle) 		return ERR_PTR(err);
20680f14599cSMatthew Wilcox (Oracle) 	}
2069db64fe02SNick Piggin 
20703f804920SSebastian Andrzej Siewior 	vbq = raw_cpu_ptr(&vmap_block_queue);
2071db64fe02SNick Piggin 	spin_lock(&vbq->lock);
207268ac546fSRoman Pen 	list_add_tail_rcu(&vb->free_list, &vbq->free);
2073db64fe02SNick Piggin 	spin_unlock(&vbq->lock);
2074db64fe02SNick Piggin 
2075cf725ce2SRoman Pen 	return vaddr;
2076db64fe02SNick Piggin }
2077db64fe02SNick Piggin 
2078db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb)
2079db64fe02SNick Piggin {
2080db64fe02SNick Piggin 	struct vmap_block *tmp;
2081062eacf5SUladzislau Rezki (Sony) 	struct xarray *xa;
2082db64fe02SNick Piggin 
2083fa1c77c1SUladzislau Rezki (Sony) 	xa = addr_to_vb_xa(vb->va->va_start);
2084062eacf5SUladzislau Rezki (Sony) 	tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2085db64fe02SNick Piggin 	BUG_ON(tmp != vb);
2086db64fe02SNick Piggin 
2087edd89818SUladzislau Rezki (Sony) 	spin_lock(&vmap_area_lock);
2088edd89818SUladzislau Rezki (Sony) 	unlink_va(vb->va, &vmap_area_root);
2089edd89818SUladzislau Rezki (Sony) 	spin_unlock(&vmap_area_lock);
2090edd89818SUladzislau Rezki (Sony) 
209164141da5SJeremy Fitzhardinge 	free_vmap_area_noflush(vb->va);
209222a3c7d1SLai Jiangshan 	kfree_rcu(vb, rcu_head);
2093db64fe02SNick Piggin }
2094db64fe02SNick Piggin 
2095ca5e46c3SThomas Gleixner static bool purge_fragmented_block(struct vmap_block *vb,
209677e50af0SThomas Gleixner 		struct vmap_block_queue *vbq, struct list_head *purge_list,
209777e50af0SThomas Gleixner 		bool force_purge)
209802b709dfSNick Piggin {
2099ca5e46c3SThomas Gleixner 	if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2100ca5e46c3SThomas Gleixner 	    vb->dirty == VMAP_BBMAP_BITS)
2101ca5e46c3SThomas Gleixner 		return false;
210202b709dfSNick Piggin 
210377e50af0SThomas Gleixner 	/* Don't overeagerly purge usable blocks unless requested */
210477e50af0SThomas Gleixner 	if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
210577e50af0SThomas Gleixner 		return false;
210677e50af0SThomas Gleixner 
2107ca5e46c3SThomas Gleixner 	/* prevent further allocs after releasing lock */
21087f48121eSThomas Gleixner 	WRITE_ONCE(vb->free, 0);
2109ca5e46c3SThomas Gleixner 	/* prevent purging it again */
21107f48121eSThomas Gleixner 	WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
21117d61bfe8SRoman Pen 	vb->dirty_min = 0;
21127d61bfe8SRoman Pen 	vb->dirty_max = VMAP_BBMAP_BITS;
211302b709dfSNick Piggin 	spin_lock(&vbq->lock);
211402b709dfSNick Piggin 	list_del_rcu(&vb->free_list);
211502b709dfSNick Piggin 	spin_unlock(&vbq->lock);
2116ca5e46c3SThomas Gleixner 	list_add_tail(&vb->purge, purge_list);
2117ca5e46c3SThomas Gleixner 	return true;
211802b709dfSNick Piggin }
211902b709dfSNick Piggin 
2120ca5e46c3SThomas Gleixner static void free_purged_blocks(struct list_head *purge_list)
2121ca5e46c3SThomas Gleixner {
2122ca5e46c3SThomas Gleixner 	struct vmap_block *vb, *n_vb;
2123ca5e46c3SThomas Gleixner 
2124ca5e46c3SThomas Gleixner 	list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
212502b709dfSNick Piggin 		list_del(&vb->purge);
212602b709dfSNick Piggin 		free_vmap_block(vb);
212702b709dfSNick Piggin 	}
212802b709dfSNick Piggin }
212902b709dfSNick Piggin 
2130ca5e46c3SThomas Gleixner static void purge_fragmented_blocks(int cpu)
2131ca5e46c3SThomas Gleixner {
2132ca5e46c3SThomas Gleixner 	LIST_HEAD(purge);
2133ca5e46c3SThomas Gleixner 	struct vmap_block *vb;
2134ca5e46c3SThomas Gleixner 	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2135ca5e46c3SThomas Gleixner 
2136ca5e46c3SThomas Gleixner 	rcu_read_lock();
2137ca5e46c3SThomas Gleixner 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
21387f48121eSThomas Gleixner 		unsigned long free = READ_ONCE(vb->free);
21397f48121eSThomas Gleixner 		unsigned long dirty = READ_ONCE(vb->dirty);
21407f48121eSThomas Gleixner 
21417f48121eSThomas Gleixner 		if (free + dirty != VMAP_BBMAP_BITS ||
21427f48121eSThomas Gleixner 		    dirty == VMAP_BBMAP_BITS)
2143ca5e46c3SThomas Gleixner 			continue;
2144ca5e46c3SThomas Gleixner 
2145ca5e46c3SThomas Gleixner 		spin_lock(&vb->lock);
214677e50af0SThomas Gleixner 		purge_fragmented_block(vb, vbq, &purge, true);
2147ca5e46c3SThomas Gleixner 		spin_unlock(&vb->lock);
2148ca5e46c3SThomas Gleixner 	}
2149ca5e46c3SThomas Gleixner 	rcu_read_unlock();
2150ca5e46c3SThomas Gleixner 	free_purged_blocks(&purge);
2151ca5e46c3SThomas Gleixner }
2152ca5e46c3SThomas Gleixner 
215302b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void)
215402b709dfSNick Piggin {
215502b709dfSNick Piggin 	int cpu;
215602b709dfSNick Piggin 
215702b709dfSNick Piggin 	for_each_possible_cpu(cpu)
215802b709dfSNick Piggin 		purge_fragmented_blocks(cpu);
215902b709dfSNick Piggin }
216002b709dfSNick Piggin 
2161db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2162db64fe02SNick Piggin {
2163db64fe02SNick Piggin 	struct vmap_block_queue *vbq;
2164db64fe02SNick Piggin 	struct vmap_block *vb;
2165cf725ce2SRoman Pen 	void *vaddr = NULL;
2166db64fe02SNick Piggin 	unsigned int order;
2167db64fe02SNick Piggin 
2168891c49abSAlexander Kuleshov 	BUG_ON(offset_in_page(size));
2169db64fe02SNick Piggin 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2170aa91c4d8SJan Kara 	if (WARN_ON(size == 0)) {
2171aa91c4d8SJan Kara 		/*
2172aa91c4d8SJan Kara 		 * Allocating 0 bytes isn't what caller wants since
2173aa91c4d8SJan Kara 		 * get_order(0) returns funny result. Just warn and terminate
2174aa91c4d8SJan Kara 		 * early.
2175aa91c4d8SJan Kara 		 */
2176aa91c4d8SJan Kara 		return NULL;
2177aa91c4d8SJan Kara 	}
2178db64fe02SNick Piggin 	order = get_order(size);
2179db64fe02SNick Piggin 
2180db64fe02SNick Piggin 	rcu_read_lock();
21813f804920SSebastian Andrzej Siewior 	vbq = raw_cpu_ptr(&vmap_block_queue);
2182db64fe02SNick Piggin 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2183cf725ce2SRoman Pen 		unsigned long pages_off;
2184db64fe02SNick Piggin 
218543d76502SThomas Gleixner 		if (READ_ONCE(vb->free) < (1UL << order))
218643d76502SThomas Gleixner 			continue;
218743d76502SThomas Gleixner 
2188db64fe02SNick Piggin 		spin_lock(&vb->lock);
2189cf725ce2SRoman Pen 		if (vb->free < (1UL << order)) {
2190cf725ce2SRoman Pen 			spin_unlock(&vb->lock);
2191cf725ce2SRoman Pen 			continue;
2192cf725ce2SRoman Pen 		}
219302b709dfSNick Piggin 
2194cf725ce2SRoman Pen 		pages_off = VMAP_BBMAP_BITS - vb->free;
2195cf725ce2SRoman Pen 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
219643d76502SThomas Gleixner 		WRITE_ONCE(vb->free, vb->free - (1UL << order));
2197d76f9954SBaoquan He 		bitmap_set(vb->used_map, pages_off, (1UL << order));
2198db64fe02SNick Piggin 		if (vb->free == 0) {
2199db64fe02SNick Piggin 			spin_lock(&vbq->lock);
2200de560423SNick Piggin 			list_del_rcu(&vb->free_list);
2201db64fe02SNick Piggin 			spin_unlock(&vbq->lock);
2202db64fe02SNick Piggin 		}
2203cf725ce2SRoman Pen 
2204db64fe02SNick Piggin 		spin_unlock(&vb->lock);
2205db64fe02SNick Piggin 		break;
2206db64fe02SNick Piggin 	}
220702b709dfSNick Piggin 
2208db64fe02SNick Piggin 	rcu_read_unlock();
2209db64fe02SNick Piggin 
2210cf725ce2SRoman Pen 	/* Allocate new block if nothing was found */
2211cf725ce2SRoman Pen 	if (!vaddr)
2212cf725ce2SRoman Pen 		vaddr = new_vmap_block(order, gfp_mask);
2213db64fe02SNick Piggin 
2214cf725ce2SRoman Pen 	return vaddr;
2215db64fe02SNick Piggin }
2216db64fe02SNick Piggin 
221778a0e8c4SChristoph Hellwig static void vb_free(unsigned long addr, unsigned long size)
2218db64fe02SNick Piggin {
2219db64fe02SNick Piggin 	unsigned long offset;
2220db64fe02SNick Piggin 	unsigned int order;
2221db64fe02SNick Piggin 	struct vmap_block *vb;
2222062eacf5SUladzislau Rezki (Sony) 	struct xarray *xa;
2223db64fe02SNick Piggin 
2224891c49abSAlexander Kuleshov 	BUG_ON(offset_in_page(size));
2225db64fe02SNick Piggin 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2226b29acbdcSNick Piggin 
222778a0e8c4SChristoph Hellwig 	flush_cache_vunmap(addr, addr + size);
2228b29acbdcSNick Piggin 
2229db64fe02SNick Piggin 	order = get_order(size);
223078a0e8c4SChristoph Hellwig 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2231062eacf5SUladzislau Rezki (Sony) 
2232fa1c77c1SUladzislau Rezki (Sony) 	xa = addr_to_vb_xa(addr);
2233062eacf5SUladzislau Rezki (Sony) 	vb = xa_load(xa, addr_to_vb_idx(addr));
2234062eacf5SUladzislau Rezki (Sony) 
2235d76f9954SBaoquan He 	spin_lock(&vb->lock);
2236d76f9954SBaoquan He 	bitmap_clear(vb->used_map, offset, (1UL << order));
2237d76f9954SBaoquan He 	spin_unlock(&vb->lock);
2238db64fe02SNick Piggin 
22394ad0ae8cSNicholas Piggin 	vunmap_range_noflush(addr, addr + size);
224064141da5SJeremy Fitzhardinge 
22418e57f8acSVlastimil Babka 	if (debug_pagealloc_enabled_static())
224278a0e8c4SChristoph Hellwig 		flush_tlb_kernel_range(addr, addr + size);
224382a2e924SChintan Pandya 
2244db64fe02SNick Piggin 	spin_lock(&vb->lock);
22457d61bfe8SRoman Pen 
2246a09fad96SThomas Gleixner 	/* Expand the not yet TLB flushed dirty range */
22477d61bfe8SRoman Pen 	vb->dirty_min = min(vb->dirty_min, offset);
22487d61bfe8SRoman Pen 	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2249d086817dSMinChan Kim 
22507f48121eSThomas Gleixner 	WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2251db64fe02SNick Piggin 	if (vb->dirty == VMAP_BBMAP_BITS) {
2252de560423SNick Piggin 		BUG_ON(vb->free);
2253db64fe02SNick Piggin 		spin_unlock(&vb->lock);
2254db64fe02SNick Piggin 		free_vmap_block(vb);
2255db64fe02SNick Piggin 	} else
2256db64fe02SNick Piggin 		spin_unlock(&vb->lock);
2257db64fe02SNick Piggin }
2258db64fe02SNick Piggin 
2259868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2260db64fe02SNick Piggin {
2261ca5e46c3SThomas Gleixner 	LIST_HEAD(purge_list);
2262db64fe02SNick Piggin 	int cpu;
2263db64fe02SNick Piggin 
22649b463334SJeremy Fitzhardinge 	if (unlikely(!vmap_initialized))
22659b463334SJeremy Fitzhardinge 		return;
22669b463334SJeremy Fitzhardinge 
2267ca5e46c3SThomas Gleixner 	mutex_lock(&vmap_purge_lock);
22685803ed29SChristoph Hellwig 
2269db64fe02SNick Piggin 	for_each_possible_cpu(cpu) {
2270db64fe02SNick Piggin 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2271db64fe02SNick Piggin 		struct vmap_block *vb;
2272fc1e0d98SThomas Gleixner 		unsigned long idx;
2273db64fe02SNick Piggin 
2274db64fe02SNick Piggin 		rcu_read_lock();
2275fc1e0d98SThomas Gleixner 		xa_for_each(&vbq->vmap_blocks, idx, vb) {
2276db64fe02SNick Piggin 			spin_lock(&vb->lock);
2277ca5e46c3SThomas Gleixner 
2278ca5e46c3SThomas Gleixner 			/*
2279ca5e46c3SThomas Gleixner 			 * Try to purge a fragmented block first. If it's
2280ca5e46c3SThomas Gleixner 			 * not purgeable, check whether there is dirty
2281ca5e46c3SThomas Gleixner 			 * space to be flushed.
2282ca5e46c3SThomas Gleixner 			 */
228377e50af0SThomas Gleixner 			if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
2284a09fad96SThomas Gleixner 			    vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
22857d61bfe8SRoman Pen 				unsigned long va_start = vb->va->va_start;
2286db64fe02SNick Piggin 				unsigned long s, e;
2287b136be5eSJoonsoo Kim 
22887d61bfe8SRoman Pen 				s = va_start + (vb->dirty_min << PAGE_SHIFT);
22897d61bfe8SRoman Pen 				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2290db64fe02SNick Piggin 
22917d61bfe8SRoman Pen 				start = min(s, start);
22927d61bfe8SRoman Pen 				end   = max(e, end);
22937d61bfe8SRoman Pen 
2294a09fad96SThomas Gleixner 				/* Prevent that this is flushed again */
2295a09fad96SThomas Gleixner 				vb->dirty_min = VMAP_BBMAP_BITS;
2296a09fad96SThomas Gleixner 				vb->dirty_max = 0;
2297a09fad96SThomas Gleixner 
2298db64fe02SNick Piggin 				flush = 1;
2299db64fe02SNick Piggin 			}
2300db64fe02SNick Piggin 			spin_unlock(&vb->lock);
2301db64fe02SNick Piggin 		}
2302db64fe02SNick Piggin 		rcu_read_unlock();
2303db64fe02SNick Piggin 	}
2304ca5e46c3SThomas Gleixner 	free_purged_blocks(&purge_list);
2305db64fe02SNick Piggin 
23060574ecd1SChristoph Hellwig 	if (!__purge_vmap_area_lazy(start, end) && flush)
23070574ecd1SChristoph Hellwig 		flush_tlb_kernel_range(start, end);
2308f9e09977SChristoph Hellwig 	mutex_unlock(&vmap_purge_lock);
2309db64fe02SNick Piggin }
2310868b104dSRick Edgecombe 
2311868b104dSRick Edgecombe /**
2312868b104dSRick Edgecombe  * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2313868b104dSRick Edgecombe  *
2314868b104dSRick Edgecombe  * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2315868b104dSRick Edgecombe  * to amortize TLB flushing overheads. What this means is that any page you
2316868b104dSRick Edgecombe  * have now, may, in a former life, have been mapped into kernel virtual
2317868b104dSRick Edgecombe  * address by the vmap layer and so there might be some CPUs with TLB entries
2318868b104dSRick Edgecombe  * still referencing that page (additional to the regular 1:1 kernel mapping).
2319868b104dSRick Edgecombe  *
2320868b104dSRick Edgecombe  * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2321868b104dSRick Edgecombe  * be sure that none of the pages we have control over will have any aliases
2322868b104dSRick Edgecombe  * from the vmap layer.
2323868b104dSRick Edgecombe  */
2324868b104dSRick Edgecombe void vm_unmap_aliases(void)
2325868b104dSRick Edgecombe {
2326868b104dSRick Edgecombe 	unsigned long start = ULONG_MAX, end = 0;
2327868b104dSRick Edgecombe 	int flush = 0;
2328868b104dSRick Edgecombe 
2329868b104dSRick Edgecombe 	_vm_unmap_aliases(start, end, flush);
2330868b104dSRick Edgecombe }
2331db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2332db64fe02SNick Piggin 
2333db64fe02SNick Piggin /**
2334db64fe02SNick Piggin  * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2335db64fe02SNick Piggin  * @mem: the pointer returned by vm_map_ram
2336db64fe02SNick Piggin  * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2337db64fe02SNick Piggin  */
2338db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count)
2339db64fe02SNick Piggin {
234065ee03c4SGuillermo Julián Moreno 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
23414aff1dc4SAndrey Konovalov 	unsigned long addr = (unsigned long)kasan_reset_tag(mem);
23429c3acf60SChristoph Hellwig 	struct vmap_area *va;
2343db64fe02SNick Piggin 
23445803ed29SChristoph Hellwig 	might_sleep();
2345db64fe02SNick Piggin 	BUG_ON(!addr);
2346db64fe02SNick Piggin 	BUG_ON(addr < VMALLOC_START);
2347db64fe02SNick Piggin 	BUG_ON(addr > VMALLOC_END);
2348a1c0b1a0SShawn Lin 	BUG_ON(!PAGE_ALIGNED(addr));
2349db64fe02SNick Piggin 
2350d98c9e83SAndrey Ryabinin 	kasan_poison_vmalloc(mem, size);
2351d98c9e83SAndrey Ryabinin 
23529c3acf60SChristoph Hellwig 	if (likely(count <= VMAP_MAX_ALLOC)) {
235305e3ff95SChintan Pandya 		debug_check_no_locks_freed(mem, size);
235478a0e8c4SChristoph Hellwig 		vb_free(addr, size);
23559c3acf60SChristoph Hellwig 		return;
23569c3acf60SChristoph Hellwig 	}
23579c3acf60SChristoph Hellwig 
2358edd89818SUladzislau Rezki (Sony) 	va = find_unlink_vmap_area(addr);
235914687619SUladzislau Rezki (Sony) 	if (WARN_ON_ONCE(!va))
236014687619SUladzislau Rezki (Sony) 		return;
236114687619SUladzislau Rezki (Sony) 
236205e3ff95SChintan Pandya 	debug_check_no_locks_freed((void *)va->va_start,
236305e3ff95SChintan Pandya 				    (va->va_end - va->va_start));
23649c3acf60SChristoph Hellwig 	free_unmap_vmap_area(va);
2365db64fe02SNick Piggin }
2366db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram);
2367db64fe02SNick Piggin 
2368db64fe02SNick Piggin /**
2369db64fe02SNick Piggin  * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2370db64fe02SNick Piggin  * @pages: an array of pointers to the pages to be mapped
2371db64fe02SNick Piggin  * @count: number of pages
2372db64fe02SNick Piggin  * @node: prefer to allocate data structures on this node
2373e99c97adSRandy Dunlap  *
237436437638SGioh Kim  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
237536437638SGioh Kim  * faster than vmap so it's good.  But if you mix long-life and short-life
237636437638SGioh Kim  * objects with vm_map_ram(), it could consume lots of address space through
237736437638SGioh Kim  * fragmentation (especially on a 32bit machine).  You could see failures in
237836437638SGioh Kim  * the end.  Please use this function for short-lived objects.
237936437638SGioh Kim  *
2380e99c97adSRandy Dunlap  * Returns: a pointer to the address that has been mapped, or %NULL on failure
2381db64fe02SNick Piggin  */
2382d4efd79aSChristoph Hellwig void *vm_map_ram(struct page **pages, unsigned int count, int node)
2383db64fe02SNick Piggin {
238465ee03c4SGuillermo Julián Moreno 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2385db64fe02SNick Piggin 	unsigned long addr;
2386db64fe02SNick Piggin 	void *mem;
2387db64fe02SNick Piggin 
2388db64fe02SNick Piggin 	if (likely(count <= VMAP_MAX_ALLOC)) {
2389db64fe02SNick Piggin 		mem = vb_alloc(size, GFP_KERNEL);
2390db64fe02SNick Piggin 		if (IS_ERR(mem))
2391db64fe02SNick Piggin 			return NULL;
2392db64fe02SNick Piggin 		addr = (unsigned long)mem;
2393db64fe02SNick Piggin 	} else {
2394db64fe02SNick Piggin 		struct vmap_area *va;
2395db64fe02SNick Piggin 		va = alloc_vmap_area(size, PAGE_SIZE,
2396869176a0SBaoquan He 				VMALLOC_START, VMALLOC_END,
2397869176a0SBaoquan He 				node, GFP_KERNEL, VMAP_RAM);
2398db64fe02SNick Piggin 		if (IS_ERR(va))
2399db64fe02SNick Piggin 			return NULL;
2400db64fe02SNick Piggin 
2401db64fe02SNick Piggin 		addr = va->va_start;
2402db64fe02SNick Piggin 		mem = (void *)addr;
2403db64fe02SNick Piggin 	}
2404d98c9e83SAndrey Ryabinin 
2405b67177ecSNicholas Piggin 	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2406b67177ecSNicholas Piggin 				pages, PAGE_SHIFT) < 0) {
2407db64fe02SNick Piggin 		vm_unmap_ram(mem, count);
2408db64fe02SNick Piggin 		return NULL;
2409db64fe02SNick Piggin 	}
2410b67177ecSNicholas Piggin 
241123689e91SAndrey Konovalov 	/*
241223689e91SAndrey Konovalov 	 * Mark the pages as accessible, now that they are mapped.
241323689e91SAndrey Konovalov 	 * With hardware tag-based KASAN, marking is skipped for
241423689e91SAndrey Konovalov 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
241523689e91SAndrey Konovalov 	 */
2416f6e39794SAndrey Konovalov 	mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
241719f1c3acSAndrey Konovalov 
2418db64fe02SNick Piggin 	return mem;
2419db64fe02SNick Piggin }
2420db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram);
2421db64fe02SNick Piggin 
24224341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata;
242392eac168SMike Rapoport 
2424121e6f32SNicholas Piggin static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2425121e6f32SNicholas Piggin {
2426121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2427121e6f32SNicholas Piggin 	return vm->page_order;
2428121e6f32SNicholas Piggin #else
2429121e6f32SNicholas Piggin 	return 0;
2430121e6f32SNicholas Piggin #endif
2431121e6f32SNicholas Piggin }
2432121e6f32SNicholas Piggin 
2433121e6f32SNicholas Piggin static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2434121e6f32SNicholas Piggin {
2435121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2436121e6f32SNicholas Piggin 	vm->page_order = order;
2437121e6f32SNicholas Piggin #else
2438121e6f32SNicholas Piggin 	BUG_ON(order != 0);
2439121e6f32SNicholas Piggin #endif
2440121e6f32SNicholas Piggin }
2441121e6f32SNicholas Piggin 
2442f0aa6617STejun Heo /**
2443be9b7335SNicolas Pitre  * vm_area_add_early - add vmap area early during boot
2444be9b7335SNicolas Pitre  * @vm: vm_struct to add
2445be9b7335SNicolas Pitre  *
2446be9b7335SNicolas Pitre  * This function is used to add fixed kernel vm area to vmlist before
2447be9b7335SNicolas Pitre  * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2448be9b7335SNicolas Pitre  * should contain proper values and the other fields should be zero.
2449be9b7335SNicolas Pitre  *
2450be9b7335SNicolas Pitre  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2451be9b7335SNicolas Pitre  */
2452be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm)
2453be9b7335SNicolas Pitre {
2454be9b7335SNicolas Pitre 	struct vm_struct *tmp, **p;
2455be9b7335SNicolas Pitre 
2456be9b7335SNicolas Pitre 	BUG_ON(vmap_initialized);
2457be9b7335SNicolas Pitre 	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2458be9b7335SNicolas Pitre 		if (tmp->addr >= vm->addr) {
2459be9b7335SNicolas Pitre 			BUG_ON(tmp->addr < vm->addr + vm->size);
2460be9b7335SNicolas Pitre 			break;
2461be9b7335SNicolas Pitre 		} else
2462be9b7335SNicolas Pitre 			BUG_ON(tmp->addr + tmp->size > vm->addr);
2463be9b7335SNicolas Pitre 	}
2464be9b7335SNicolas Pitre 	vm->next = *p;
2465be9b7335SNicolas Pitre 	*p = vm;
2466be9b7335SNicolas Pitre }
2467be9b7335SNicolas Pitre 
2468be9b7335SNicolas Pitre /**
2469f0aa6617STejun Heo  * vm_area_register_early - register vmap area early during boot
2470f0aa6617STejun Heo  * @vm: vm_struct to register
2471c0c0a293STejun Heo  * @align: requested alignment
2472f0aa6617STejun Heo  *
2473f0aa6617STejun Heo  * This function is used to register kernel vm area before
2474f0aa6617STejun Heo  * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2475f0aa6617STejun Heo  * proper values on entry and other fields should be zero.  On return,
2476f0aa6617STejun Heo  * vm->addr contains the allocated address.
2477f0aa6617STejun Heo  *
2478f0aa6617STejun Heo  * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2479f0aa6617STejun Heo  */
2480c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2481f0aa6617STejun Heo {
24820eb68437SKefeng Wang 	unsigned long addr = ALIGN(VMALLOC_START, align);
24830eb68437SKefeng Wang 	struct vm_struct *cur, **p;
2484f0aa6617STejun Heo 
24850eb68437SKefeng Wang 	BUG_ON(vmap_initialized);
2486c0c0a293STejun Heo 
24870eb68437SKefeng Wang 	for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
24880eb68437SKefeng Wang 		if ((unsigned long)cur->addr - addr >= vm->size)
24890eb68437SKefeng Wang 			break;
24900eb68437SKefeng Wang 		addr = ALIGN((unsigned long)cur->addr + cur->size, align);
24910eb68437SKefeng Wang 	}
24920eb68437SKefeng Wang 
24930eb68437SKefeng Wang 	BUG_ON(addr > VMALLOC_END - vm->size);
2494c0c0a293STejun Heo 	vm->addr = (void *)addr;
24950eb68437SKefeng Wang 	vm->next = *p;
24960eb68437SKefeng Wang 	*p = vm;
24973252b1d8SKefeng Wang 	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
2498f0aa6617STejun Heo }
2499f0aa6617STejun Heo 
250068ad4a33SUladzislau Rezki (Sony) static void vmap_init_free_space(void)
250168ad4a33SUladzislau Rezki (Sony) {
250268ad4a33SUladzislau Rezki (Sony) 	unsigned long vmap_start = 1;
250368ad4a33SUladzislau Rezki (Sony) 	const unsigned long vmap_end = ULONG_MAX;
250468ad4a33SUladzislau Rezki (Sony) 	struct vmap_area *busy, *free;
250568ad4a33SUladzislau Rezki (Sony) 
250668ad4a33SUladzislau Rezki (Sony) 	/*
250768ad4a33SUladzislau Rezki (Sony) 	 *     B     F     B     B     B     F
250868ad4a33SUladzislau Rezki (Sony) 	 * -|-----|.....|-----|-----|-----|.....|-
250968ad4a33SUladzislau Rezki (Sony) 	 *  |           The KVA space           |
251068ad4a33SUladzislau Rezki (Sony) 	 *  |<--------------------------------->|
251168ad4a33SUladzislau Rezki (Sony) 	 */
251268ad4a33SUladzislau Rezki (Sony) 	list_for_each_entry(busy, &vmap_area_list, list) {
251368ad4a33SUladzislau Rezki (Sony) 		if (busy->va_start - vmap_start > 0) {
251468ad4a33SUladzislau Rezki (Sony) 			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
251568ad4a33SUladzislau Rezki (Sony) 			if (!WARN_ON_ONCE(!free)) {
251668ad4a33SUladzislau Rezki (Sony) 				free->va_start = vmap_start;
251768ad4a33SUladzislau Rezki (Sony) 				free->va_end = busy->va_start;
251868ad4a33SUladzislau Rezki (Sony) 
251968ad4a33SUladzislau Rezki (Sony) 				insert_vmap_area_augment(free, NULL,
252068ad4a33SUladzislau Rezki (Sony) 					&free_vmap_area_root,
252168ad4a33SUladzislau Rezki (Sony) 						&free_vmap_area_list);
252268ad4a33SUladzislau Rezki (Sony) 			}
252368ad4a33SUladzislau Rezki (Sony) 		}
252468ad4a33SUladzislau Rezki (Sony) 
252568ad4a33SUladzislau Rezki (Sony) 		vmap_start = busy->va_end;
252668ad4a33SUladzislau Rezki (Sony) 	}
252768ad4a33SUladzislau Rezki (Sony) 
252868ad4a33SUladzislau Rezki (Sony) 	if (vmap_end - vmap_start > 0) {
252968ad4a33SUladzislau Rezki (Sony) 		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
253068ad4a33SUladzislau Rezki (Sony) 		if (!WARN_ON_ONCE(!free)) {
253168ad4a33SUladzislau Rezki (Sony) 			free->va_start = vmap_start;
253268ad4a33SUladzislau Rezki (Sony) 			free->va_end = vmap_end;
253368ad4a33SUladzislau Rezki (Sony) 
253468ad4a33SUladzislau Rezki (Sony) 			insert_vmap_area_augment(free, NULL,
253568ad4a33SUladzislau Rezki (Sony) 				&free_vmap_area_root,
253668ad4a33SUladzislau Rezki (Sony) 					&free_vmap_area_list);
253768ad4a33SUladzislau Rezki (Sony) 		}
253868ad4a33SUladzislau Rezki (Sony) 	}
253968ad4a33SUladzislau Rezki (Sony) }
254068ad4a33SUladzislau Rezki (Sony) 
2541e36176beSUladzislau Rezki (Sony) static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2542e36176beSUladzislau Rezki (Sony) 	struct vmap_area *va, unsigned long flags, const void *caller)
2543cf88c790STejun Heo {
2544cf88c790STejun Heo 	vm->flags = flags;
2545cf88c790STejun Heo 	vm->addr = (void *)va->va_start;
2546cf88c790STejun Heo 	vm->size = va->va_end - va->va_start;
2547cf88c790STejun Heo 	vm->caller = caller;
2548db1aecafSMinchan Kim 	va->vm = vm;
2549e36176beSUladzislau Rezki (Sony) }
2550e36176beSUladzislau Rezki (Sony) 
2551e36176beSUladzislau Rezki (Sony) static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2552e36176beSUladzislau Rezki (Sony) 			      unsigned long flags, const void *caller)
2553e36176beSUladzislau Rezki (Sony) {
2554e36176beSUladzislau Rezki (Sony) 	spin_lock(&vmap_area_lock);
2555e36176beSUladzislau Rezki (Sony) 	setup_vmalloc_vm_locked(vm, va, flags, caller);
2556c69480adSJoonsoo Kim 	spin_unlock(&vmap_area_lock);
2557f5252e00SMitsuo Hayasaka }
2558cf88c790STejun Heo 
255920fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2560f5252e00SMitsuo Hayasaka {
2561d4033afdSJoonsoo Kim 	/*
256220fc02b4SZhang Yanfei 	 * Before removing VM_UNINITIALIZED,
2563d4033afdSJoonsoo Kim 	 * we should make sure that vm has proper values.
2564d4033afdSJoonsoo Kim 	 * Pair with smp_rmb() in show_numa_info().
2565d4033afdSJoonsoo Kim 	 */
2566d4033afdSJoonsoo Kim 	smp_wmb();
256720fc02b4SZhang Yanfei 	vm->flags &= ~VM_UNINITIALIZED;
2568cf88c790STejun Heo }
2569cf88c790STejun Heo 
2570db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size,
25717ca3027bSDaniel Axtens 		unsigned long align, unsigned long shift, unsigned long flags,
25727ca3027bSDaniel Axtens 		unsigned long start, unsigned long end, int node,
25737ca3027bSDaniel Axtens 		gfp_t gfp_mask, const void *caller)
2574db64fe02SNick Piggin {
25750006526dSKautuk Consul 	struct vmap_area *va;
2576db64fe02SNick Piggin 	struct vm_struct *area;
2577d98c9e83SAndrey Ryabinin 	unsigned long requested_size = size;
25781da177e4SLinus Torvalds 
257952fd24caSGiridhar Pemmasani 	BUG_ON(in_interrupt());
25807ca3027bSDaniel Axtens 	size = ALIGN(size, 1ul << shift);
258131be8309SOGAWA Hirofumi 	if (unlikely(!size))
258231be8309SOGAWA Hirofumi 		return NULL;
25831da177e4SLinus Torvalds 
2584252e5c6eSzijun_hu 	if (flags & VM_IOREMAP)
2585252e5c6eSzijun_hu 		align = 1ul << clamp_t(int, get_count_order_long(size),
2586252e5c6eSzijun_hu 				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2587252e5c6eSzijun_hu 
2588cf88c790STejun Heo 	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
25891da177e4SLinus Torvalds 	if (unlikely(!area))
25901da177e4SLinus Torvalds 		return NULL;
25911da177e4SLinus Torvalds 
259271394fe5SAndrey Ryabinin 	if (!(flags & VM_NO_GUARD))
25931da177e4SLinus Torvalds 		size += PAGE_SIZE;
25941da177e4SLinus Torvalds 
2595869176a0SBaoquan He 	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
2596db64fe02SNick Piggin 	if (IS_ERR(va)) {
2597db64fe02SNick Piggin 		kfree(area);
2598db64fe02SNick Piggin 		return NULL;
25991da177e4SLinus Torvalds 	}
26001da177e4SLinus Torvalds 
2601d98c9e83SAndrey Ryabinin 	setup_vmalloc_vm(area, va, flags, caller);
26023c5c3cfbSDaniel Axtens 
260319f1c3acSAndrey Konovalov 	/*
260419f1c3acSAndrey Konovalov 	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
260519f1c3acSAndrey Konovalov 	 * best-effort approach, as they can be mapped outside of vmalloc code.
260619f1c3acSAndrey Konovalov 	 * For VM_ALLOC mappings, the pages are marked as accessible after
260719f1c3acSAndrey Konovalov 	 * getting mapped in __vmalloc_node_range().
260823689e91SAndrey Konovalov 	 * With hardware tag-based KASAN, marking is skipped for
260923689e91SAndrey Konovalov 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
261019f1c3acSAndrey Konovalov 	 */
261119f1c3acSAndrey Konovalov 	if (!(flags & VM_ALLOC))
261223689e91SAndrey Konovalov 		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
2613f6e39794SAndrey Konovalov 						    KASAN_VMALLOC_PROT_NORMAL);
26141d96320fSAndrey Konovalov 
26151da177e4SLinus Torvalds 	return area;
26161da177e4SLinus Torvalds }
26171da177e4SLinus Torvalds 
2618c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2619c2968612SBenjamin Herrenschmidt 				       unsigned long start, unsigned long end,
26205e6cafc8SMarek Szyprowski 				       const void *caller)
2621c2968612SBenjamin Herrenschmidt {
26227ca3027bSDaniel Axtens 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
26237ca3027bSDaniel Axtens 				  NUMA_NO_NODE, GFP_KERNEL, caller);
2624c2968612SBenjamin Herrenschmidt }
2625c2968612SBenjamin Herrenschmidt 
26261da177e4SLinus Torvalds /**
2627183ff22bSSimon Arlott  * get_vm_area - reserve a contiguous kernel virtual area
26281da177e4SLinus Torvalds  * @size:	 size of the area
26291da177e4SLinus Torvalds  * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
26301da177e4SLinus Torvalds  *
26311da177e4SLinus Torvalds  * Search an area of @size in the kernel virtual mapping area,
26321da177e4SLinus Torvalds  * and reserved it for out purposes.  Returns the area descriptor
26331da177e4SLinus Torvalds  * on success or %NULL on failure.
2634a862f68aSMike Rapoport  *
2635a862f68aSMike Rapoport  * Return: the area descriptor on success or %NULL on failure.
26361da177e4SLinus Torvalds  */
26371da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
26381da177e4SLinus Torvalds {
26397ca3027bSDaniel Axtens 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
26407ca3027bSDaniel Axtens 				  VMALLOC_START, VMALLOC_END,
264100ef2d2fSDavid Rientjes 				  NUMA_NO_NODE, GFP_KERNEL,
264200ef2d2fSDavid Rientjes 				  __builtin_return_address(0));
264323016969SChristoph Lameter }
264423016969SChristoph Lameter 
264523016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
26465e6cafc8SMarek Szyprowski 				const void *caller)
264723016969SChristoph Lameter {
26487ca3027bSDaniel Axtens 	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
26497ca3027bSDaniel Axtens 				  VMALLOC_START, VMALLOC_END,
265000ef2d2fSDavid Rientjes 				  NUMA_NO_NODE, GFP_KERNEL, caller);
26511da177e4SLinus Torvalds }
26521da177e4SLinus Torvalds 
2653e9da6e99SMarek Szyprowski /**
2654e9da6e99SMarek Szyprowski  * find_vm_area - find a continuous kernel virtual area
2655e9da6e99SMarek Szyprowski  * @addr:	  base address
2656e9da6e99SMarek Szyprowski  *
2657e9da6e99SMarek Szyprowski  * Search for the kernel VM area starting at @addr, and return it.
2658e9da6e99SMarek Szyprowski  * It is up to the caller to do all required locking to keep the returned
2659e9da6e99SMarek Szyprowski  * pointer valid.
2660a862f68aSMike Rapoport  *
266174640617SHui Su  * Return: the area descriptor on success or %NULL on failure.
2662e9da6e99SMarek Szyprowski  */
2663e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr)
266483342314SNick Piggin {
2665db64fe02SNick Piggin 	struct vmap_area *va;
266683342314SNick Piggin 
2667db64fe02SNick Piggin 	va = find_vmap_area((unsigned long)addr);
2668688fcbfcSPengfei Li 	if (!va)
26697856dfebSAndi Kleen 		return NULL;
2670688fcbfcSPengfei Li 
2671688fcbfcSPengfei Li 	return va->vm;
26727856dfebSAndi Kleen }
26737856dfebSAndi Kleen 
26741da177e4SLinus Torvalds /**
2675183ff22bSSimon Arlott  * remove_vm_area - find and remove a continuous kernel virtual area
26761da177e4SLinus Torvalds  * @addr:	    base address
26771da177e4SLinus Torvalds  *
26781da177e4SLinus Torvalds  * Search for the kernel VM area starting at @addr, and remove it.
26791da177e4SLinus Torvalds  * This function returns the found VM area, but using it is NOT safe
26807856dfebSAndi Kleen  * on SMP machines, except for its size or flags.
2681a862f68aSMike Rapoport  *
268274640617SHui Su  * Return: the area descriptor on success or %NULL on failure.
26831da177e4SLinus Torvalds  */
2684b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr)
26851da177e4SLinus Torvalds {
2686db64fe02SNick Piggin 	struct vmap_area *va;
268775c59ce7SChristoph Hellwig 	struct vm_struct *vm;
2688db64fe02SNick Piggin 
26895803ed29SChristoph Hellwig 	might_sleep();
26905803ed29SChristoph Hellwig 
269117d3ef43SChristoph Hellwig 	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
269217d3ef43SChristoph Hellwig 			addr))
2693db64fe02SNick Piggin 		return NULL;
269417d3ef43SChristoph Hellwig 
269575c59ce7SChristoph Hellwig 	va = find_unlink_vmap_area((unsigned long)addr);
269675c59ce7SChristoph Hellwig 	if (!va || !va->vm)
269775c59ce7SChristoph Hellwig 		return NULL;
269875c59ce7SChristoph Hellwig 	vm = va->vm;
269917d3ef43SChristoph Hellwig 
270017d3ef43SChristoph Hellwig 	debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
270117d3ef43SChristoph Hellwig 	debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
270275c59ce7SChristoph Hellwig 	kasan_free_module_shadow(vm);
270317d3ef43SChristoph Hellwig 	kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
270417d3ef43SChristoph Hellwig 
270575c59ce7SChristoph Hellwig 	free_unmap_vmap_area(va);
270675c59ce7SChristoph Hellwig 	return vm;
27071da177e4SLinus Torvalds }
27081da177e4SLinus Torvalds 
2709868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area,
2710868b104dSRick Edgecombe 				       int (*set_direct_map)(struct page *page))
2711868b104dSRick Edgecombe {
2712868b104dSRick Edgecombe 	int i;
2713868b104dSRick Edgecombe 
2714121e6f32SNicholas Piggin 	/* HUGE_VMALLOC passes small pages to set_direct_map */
2715868b104dSRick Edgecombe 	for (i = 0; i < area->nr_pages; i++)
2716868b104dSRick Edgecombe 		if (page_address(area->pages[i]))
2717868b104dSRick Edgecombe 			set_direct_map(area->pages[i]);
2718868b104dSRick Edgecombe }
2719868b104dSRick Edgecombe 
27209e5fa0aeSChristoph Hellwig /*
27219e5fa0aeSChristoph Hellwig  * Flush the vm mapping and reset the direct map.
27229e5fa0aeSChristoph Hellwig  */
27239e5fa0aeSChristoph Hellwig static void vm_reset_perms(struct vm_struct *area)
2724868b104dSRick Edgecombe {
2725868b104dSRick Edgecombe 	unsigned long start = ULONG_MAX, end = 0;
2726121e6f32SNicholas Piggin 	unsigned int page_order = vm_area_page_order(area);
272731e67340SRick Edgecombe 	int flush_dmap = 0;
2728868b104dSRick Edgecombe 	int i;
2729868b104dSRick Edgecombe 
2730868b104dSRick Edgecombe 	/*
27319e5fa0aeSChristoph Hellwig 	 * Find the start and end range of the direct mappings to make sure that
2732868b104dSRick Edgecombe 	 * the vm_unmap_aliases() flush includes the direct map.
2733868b104dSRick Edgecombe 	 */
2734121e6f32SNicholas Piggin 	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
27358e41f872SRick Edgecombe 		unsigned long addr = (unsigned long)page_address(area->pages[i]);
27369e5fa0aeSChristoph Hellwig 
27378e41f872SRick Edgecombe 		if (addr) {
2738121e6f32SNicholas Piggin 			unsigned long page_size;
2739121e6f32SNicholas Piggin 
2740121e6f32SNicholas Piggin 			page_size = PAGE_SIZE << page_order;
2741868b104dSRick Edgecombe 			start = min(addr, start);
2742121e6f32SNicholas Piggin 			end = max(addr + page_size, end);
274331e67340SRick Edgecombe 			flush_dmap = 1;
2744868b104dSRick Edgecombe 		}
2745868b104dSRick Edgecombe 	}
2746868b104dSRick Edgecombe 
2747868b104dSRick Edgecombe 	/*
2748868b104dSRick Edgecombe 	 * Set direct map to something invalid so that it won't be cached if
2749868b104dSRick Edgecombe 	 * there are any accesses after the TLB flush, then flush the TLB and
2750868b104dSRick Edgecombe 	 * reset the direct map permissions to the default.
2751868b104dSRick Edgecombe 	 */
2752868b104dSRick Edgecombe 	set_area_direct_map(area, set_direct_map_invalid_noflush);
275331e67340SRick Edgecombe 	_vm_unmap_aliases(start, end, flush_dmap);
2754868b104dSRick Edgecombe 	set_area_direct_map(area, set_direct_map_default_noflush);
2755868b104dSRick Edgecombe }
2756868b104dSRick Edgecombe 
2757208162f4SChristoph Hellwig static void delayed_vfree_work(struct work_struct *w)
27581da177e4SLinus Torvalds {
2759208162f4SChristoph Hellwig 	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
2760208162f4SChristoph Hellwig 	struct llist_node *t, *llnode;
27611da177e4SLinus Torvalds 
2762208162f4SChristoph Hellwig 	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
27635d3d31d6SChristoph Hellwig 		vfree(llnode);
2764bf22e37aSAndrey Ryabinin }
2765bf22e37aSAndrey Ryabinin 
2766bf22e37aSAndrey Ryabinin /**
2767bf22e37aSAndrey Ryabinin  * vfree_atomic - release memory allocated by vmalloc()
2768bf22e37aSAndrey Ryabinin  * @addr:	  memory base address
2769bf22e37aSAndrey Ryabinin  *
2770bf22e37aSAndrey Ryabinin  * This one is just like vfree() but can be called in any atomic context
2771bf22e37aSAndrey Ryabinin  * except NMIs.
2772bf22e37aSAndrey Ryabinin  */
2773bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr)
2774bf22e37aSAndrey Ryabinin {
277501e2e839SChristoph Hellwig 	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2776bf22e37aSAndrey Ryabinin 
277701e2e839SChristoph Hellwig 	BUG_ON(in_nmi());
2778bf22e37aSAndrey Ryabinin 	kmemleak_free(addr);
2779bf22e37aSAndrey Ryabinin 
278001e2e839SChristoph Hellwig 	/*
278101e2e839SChristoph Hellwig 	 * Use raw_cpu_ptr() because this can be called from preemptible
278201e2e839SChristoph Hellwig 	 * context. Preemption is absolutely fine here, because the llist_add()
278301e2e839SChristoph Hellwig 	 * implementation is lockless, so it works even if we are adding to
278401e2e839SChristoph Hellwig 	 * another cpu's list. schedule_work() should be fine with this too.
278501e2e839SChristoph Hellwig 	 */
278601e2e839SChristoph Hellwig 	if (addr && llist_add((struct llist_node *)addr, &p->list))
278701e2e839SChristoph Hellwig 		schedule_work(&p->wq);
2788c67dc624SRoman Penyaev }
2789c67dc624SRoman Penyaev 
27901da177e4SLinus Torvalds /**
2791fa307474SMatthew Wilcox (Oracle)  * vfree - Release memory allocated by vmalloc()
2792fa307474SMatthew Wilcox (Oracle)  * @addr:  Memory base address
27931da177e4SLinus Torvalds  *
2794fa307474SMatthew Wilcox (Oracle)  * Free the virtually continuous memory area starting at @addr, as obtained
2795fa307474SMatthew Wilcox (Oracle)  * from one of the vmalloc() family of APIs.  This will usually also free the
2796fa307474SMatthew Wilcox (Oracle)  * physical memory underlying the virtual allocation, but that memory is
2797fa307474SMatthew Wilcox (Oracle)  * reference counted, so it will not be freed until the last user goes away.
27981da177e4SLinus Torvalds  *
2799fa307474SMatthew Wilcox (Oracle)  * If @addr is NULL, no operation is performed.
280032fcfd40SAl Viro  *
2801fa307474SMatthew Wilcox (Oracle)  * Context:
28023ca4ea3aSAndrey Ryabinin  * May sleep if called *not* from interrupt context.
2803fa307474SMatthew Wilcox (Oracle)  * Must not be called in NMI context (strictly speaking, it could be
2804fa307474SMatthew Wilcox (Oracle)  * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2805f0953a1bSIngo Molnar  * conventions for vfree() arch-dependent would be a really bad idea).
28061da177e4SLinus Torvalds  */
2807b3bdda02SChristoph Lameter void vfree(const void *addr)
28081da177e4SLinus Torvalds {
280979311c1fSChristoph Hellwig 	struct vm_struct *vm;
281079311c1fSChristoph Hellwig 	int i;
281179311c1fSChristoph Hellwig 
281201e2e839SChristoph Hellwig 	if (unlikely(in_interrupt())) {
281301e2e839SChristoph Hellwig 		vfree_atomic(addr);
281432fcfd40SAl Viro 		return;
281501e2e839SChristoph Hellwig 	}
281601e2e839SChristoph Hellwig 
28171da177e4SLinus Torvalds 	BUG_ON(in_nmi());
281889219d37SCatalin Marinas 	kmemleak_free(addr);
281901e2e839SChristoph Hellwig 	might_sleep();
282032fcfd40SAl Viro 
2821bf22e37aSAndrey Ryabinin 	if (!addr)
2822bf22e37aSAndrey Ryabinin 		return;
2823c67dc624SRoman Penyaev 
282479311c1fSChristoph Hellwig 	vm = remove_vm_area(addr);
282579311c1fSChristoph Hellwig 	if (unlikely(!vm)) {
282679311c1fSChristoph Hellwig 		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
282779311c1fSChristoph Hellwig 				addr);
282879311c1fSChristoph Hellwig 		return;
282979311c1fSChristoph Hellwig 	}
283079311c1fSChristoph Hellwig 
28319e5fa0aeSChristoph Hellwig 	if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
28329e5fa0aeSChristoph Hellwig 		vm_reset_perms(vm);
283379311c1fSChristoph Hellwig 	for (i = 0; i < vm->nr_pages; i++) {
283479311c1fSChristoph Hellwig 		struct page *page = vm->pages[i];
283579311c1fSChristoph Hellwig 
283679311c1fSChristoph Hellwig 		BUG_ON(!page);
283779311c1fSChristoph Hellwig 		mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
283879311c1fSChristoph Hellwig 		/*
283979311c1fSChristoph Hellwig 		 * High-order allocs for huge vmallocs are split, so
284079311c1fSChristoph Hellwig 		 * can be freed as an array of order-0 allocations
284179311c1fSChristoph Hellwig 		 */
2842dcc1be11SLorenzo Stoakes 		__free_page(page);
284379311c1fSChristoph Hellwig 		cond_resched();
284479311c1fSChristoph Hellwig 	}
284579311c1fSChristoph Hellwig 	atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
284679311c1fSChristoph Hellwig 	kvfree(vm->pages);
284779311c1fSChristoph Hellwig 	kfree(vm);
28481da177e4SLinus Torvalds }
28491da177e4SLinus Torvalds EXPORT_SYMBOL(vfree);
28501da177e4SLinus Torvalds 
28511da177e4SLinus Torvalds /**
28521da177e4SLinus Torvalds  * vunmap - release virtual mapping obtained by vmap()
28531da177e4SLinus Torvalds  * @addr:   memory base address
28541da177e4SLinus Torvalds  *
28551da177e4SLinus Torvalds  * Free the virtually contiguous memory area starting at @addr,
28561da177e4SLinus Torvalds  * which was created from the page array passed to vmap().
28571da177e4SLinus Torvalds  *
285880e93effSPekka Enberg  * Must not be called in interrupt context.
28591da177e4SLinus Torvalds  */
2860b3bdda02SChristoph Lameter void vunmap(const void *addr)
28611da177e4SLinus Torvalds {
286279311c1fSChristoph Hellwig 	struct vm_struct *vm;
286379311c1fSChristoph Hellwig 
28641da177e4SLinus Torvalds 	BUG_ON(in_interrupt());
286534754b69SPeter Zijlstra 	might_sleep();
286679311c1fSChristoph Hellwig 
286779311c1fSChristoph Hellwig 	if (!addr)
286879311c1fSChristoph Hellwig 		return;
286979311c1fSChristoph Hellwig 	vm = remove_vm_area(addr);
287079311c1fSChristoph Hellwig 	if (unlikely(!vm)) {
287179311c1fSChristoph Hellwig 		WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
287279311c1fSChristoph Hellwig 				addr);
287379311c1fSChristoph Hellwig 		return;
287479311c1fSChristoph Hellwig 	}
287579311c1fSChristoph Hellwig 	kfree(vm);
28761da177e4SLinus Torvalds }
28771da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap);
28781da177e4SLinus Torvalds 
28791da177e4SLinus Torvalds /**
28801da177e4SLinus Torvalds  * vmap - map an array of pages into virtually contiguous space
28811da177e4SLinus Torvalds  * @pages: array of page pointers
28821da177e4SLinus Torvalds  * @count: number of pages to map
28831da177e4SLinus Torvalds  * @flags: vm_area->flags
28841da177e4SLinus Torvalds  * @prot: page protection for the mapping
28851da177e4SLinus Torvalds  *
2886b944afc9SChristoph Hellwig  * Maps @count pages from @pages into contiguous kernel virtual space.
2887b944afc9SChristoph Hellwig  * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2888b944afc9SChristoph Hellwig  * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2889b944afc9SChristoph Hellwig  * are transferred from the caller to vmap(), and will be freed / dropped when
2890b944afc9SChristoph Hellwig  * vfree() is called on the return value.
2891a862f68aSMike Rapoport  *
2892a862f68aSMike Rapoport  * Return: the address of the area or %NULL on failure
28931da177e4SLinus Torvalds  */
28941da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count,
28951da177e4SLinus Torvalds 	   unsigned long flags, pgprot_t prot)
28961da177e4SLinus Torvalds {
28971da177e4SLinus Torvalds 	struct vm_struct *area;
2898b67177ecSNicholas Piggin 	unsigned long addr;
289965ee03c4SGuillermo Julián Moreno 	unsigned long size;		/* In bytes */
29001da177e4SLinus Torvalds 
290134754b69SPeter Zijlstra 	might_sleep();
290234754b69SPeter Zijlstra 
290337f3605eSChristoph Hellwig 	if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
290437f3605eSChristoph Hellwig 		return NULL;
290537f3605eSChristoph Hellwig 
2906bd1a8fb2SPeter Zijlstra 	/*
2907bd1a8fb2SPeter Zijlstra 	 * Your top guard is someone else's bottom guard. Not having a top
2908bd1a8fb2SPeter Zijlstra 	 * guard compromises someone else's mappings too.
2909bd1a8fb2SPeter Zijlstra 	 */
2910bd1a8fb2SPeter Zijlstra 	if (WARN_ON_ONCE(flags & VM_NO_GUARD))
2911bd1a8fb2SPeter Zijlstra 		flags &= ~VM_NO_GUARD;
2912bd1a8fb2SPeter Zijlstra 
2913ca79b0c2SArun KS 	if (count > totalram_pages())
29141da177e4SLinus Torvalds 		return NULL;
29151da177e4SLinus Torvalds 
291665ee03c4SGuillermo Julián Moreno 	size = (unsigned long)count << PAGE_SHIFT;
291765ee03c4SGuillermo Julián Moreno 	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
29181da177e4SLinus Torvalds 	if (!area)
29191da177e4SLinus Torvalds 		return NULL;
292023016969SChristoph Lameter 
2921b67177ecSNicholas Piggin 	addr = (unsigned long)area->addr;
2922b67177ecSNicholas Piggin 	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2923b67177ecSNicholas Piggin 				pages, PAGE_SHIFT) < 0) {
29241da177e4SLinus Torvalds 		vunmap(area->addr);
29251da177e4SLinus Torvalds 		return NULL;
29261da177e4SLinus Torvalds 	}
29271da177e4SLinus Torvalds 
2928c22ee528SMiaohe Lin 	if (flags & VM_MAP_PUT_PAGES) {
2929b944afc9SChristoph Hellwig 		area->pages = pages;
2930c22ee528SMiaohe Lin 		area->nr_pages = count;
2931c22ee528SMiaohe Lin 	}
29321da177e4SLinus Torvalds 	return area->addr;
29331da177e4SLinus Torvalds }
29341da177e4SLinus Torvalds EXPORT_SYMBOL(vmap);
29351da177e4SLinus Torvalds 
29363e9a9e25SChristoph Hellwig #ifdef CONFIG_VMAP_PFN
29373e9a9e25SChristoph Hellwig struct vmap_pfn_data {
29383e9a9e25SChristoph Hellwig 	unsigned long	*pfns;
29393e9a9e25SChristoph Hellwig 	pgprot_t	prot;
29403e9a9e25SChristoph Hellwig 	unsigned int	idx;
29413e9a9e25SChristoph Hellwig };
29423e9a9e25SChristoph Hellwig 
29433e9a9e25SChristoph Hellwig static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
29443e9a9e25SChristoph Hellwig {
29453e9a9e25SChristoph Hellwig 	struct vmap_pfn_data *data = private;
2946b3f78e74SRyan Roberts 	unsigned long pfn = data->pfns[data->idx];
2947b3f78e74SRyan Roberts 	pte_t ptent;
29483e9a9e25SChristoph Hellwig 
2949b3f78e74SRyan Roberts 	if (WARN_ON_ONCE(pfn_valid(pfn)))
29503e9a9e25SChristoph Hellwig 		return -EINVAL;
2951b3f78e74SRyan Roberts 
2952b3f78e74SRyan Roberts 	ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
2953b3f78e74SRyan Roberts 	set_pte_at(&init_mm, addr, pte, ptent);
2954b3f78e74SRyan Roberts 
2955b3f78e74SRyan Roberts 	data->idx++;
29563e9a9e25SChristoph Hellwig 	return 0;
29573e9a9e25SChristoph Hellwig }
29583e9a9e25SChristoph Hellwig 
29593e9a9e25SChristoph Hellwig /**
29603e9a9e25SChristoph Hellwig  * vmap_pfn - map an array of PFNs into virtually contiguous space
29613e9a9e25SChristoph Hellwig  * @pfns: array of PFNs
29623e9a9e25SChristoph Hellwig  * @count: number of pages to map
29633e9a9e25SChristoph Hellwig  * @prot: page protection for the mapping
29643e9a9e25SChristoph Hellwig  *
29653e9a9e25SChristoph Hellwig  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
29663e9a9e25SChristoph Hellwig  * the start address of the mapping.
29673e9a9e25SChristoph Hellwig  */
29683e9a9e25SChristoph Hellwig void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
29693e9a9e25SChristoph Hellwig {
29703e9a9e25SChristoph Hellwig 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
29713e9a9e25SChristoph Hellwig 	struct vm_struct *area;
29723e9a9e25SChristoph Hellwig 
29733e9a9e25SChristoph Hellwig 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
29743e9a9e25SChristoph Hellwig 			__builtin_return_address(0));
29753e9a9e25SChristoph Hellwig 	if (!area)
29763e9a9e25SChristoph Hellwig 		return NULL;
29773e9a9e25SChristoph Hellwig 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
29783e9a9e25SChristoph Hellwig 			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
29793e9a9e25SChristoph Hellwig 		free_vm_area(area);
29803e9a9e25SChristoph Hellwig 		return NULL;
29813e9a9e25SChristoph Hellwig 	}
2982a50420c7SAlexandre Ghiti 
2983a50420c7SAlexandre Ghiti 	flush_cache_vmap((unsigned long)area->addr,
2984a50420c7SAlexandre Ghiti 			 (unsigned long)area->addr + count * PAGE_SIZE);
2985a50420c7SAlexandre Ghiti 
29863e9a9e25SChristoph Hellwig 	return area->addr;
29873e9a9e25SChristoph Hellwig }
29883e9a9e25SChristoph Hellwig EXPORT_SYMBOL_GPL(vmap_pfn);
29893e9a9e25SChristoph Hellwig #endif /* CONFIG_VMAP_PFN */
29903e9a9e25SChristoph Hellwig 
299112b9f873SUladzislau Rezki static inline unsigned int
299212b9f873SUladzislau Rezki vm_area_alloc_pages(gfp_t gfp, int nid,
2993343ab817SUladzislau Rezki (Sony) 		unsigned int order, unsigned int nr_pages, struct page **pages)
299412b9f873SUladzislau Rezki {
299512b9f873SUladzislau Rezki 	unsigned int nr_allocated = 0;
2996e9c3cda4SMichal Hocko 	gfp_t alloc_gfp = gfp;
2997e9c3cda4SMichal Hocko 	bool nofail = false;
2998ffb29b1cSChen Wandun 	struct page *page;
2999ffb29b1cSChen Wandun 	int i;
300012b9f873SUladzislau Rezki 
300112b9f873SUladzislau Rezki 	/*
300212b9f873SUladzislau Rezki 	 * For order-0 pages we make use of bulk allocator, if
300312b9f873SUladzislau Rezki 	 * the page array is partly or not at all populated due
300412b9f873SUladzislau Rezki 	 * to fails, fallback to a single page allocator that is
300512b9f873SUladzislau Rezki 	 * more permissive.
300612b9f873SUladzislau Rezki 	 */
3007c00b6b96SChen Wandun 	if (!order) {
3008e9c3cda4SMichal Hocko 		/* bulk allocator doesn't support nofail req. officially */
30099376130cSMichal Hocko 		gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
30109376130cSMichal Hocko 
3011343ab817SUladzislau Rezki (Sony) 		while (nr_allocated < nr_pages) {
3012343ab817SUladzislau Rezki (Sony) 			unsigned int nr, nr_pages_request;
3013343ab817SUladzislau Rezki (Sony) 
3014343ab817SUladzislau Rezki (Sony) 			/*
3015343ab817SUladzislau Rezki (Sony) 			 * A maximum allowed request is hard-coded and is 100
3016343ab817SUladzislau Rezki (Sony) 			 * pages per call. That is done in order to prevent a
3017343ab817SUladzislau Rezki (Sony) 			 * long preemption off scenario in the bulk-allocator
3018343ab817SUladzislau Rezki (Sony) 			 * so the range is [1:100].
3019343ab817SUladzislau Rezki (Sony) 			 */
3020343ab817SUladzislau Rezki (Sony) 			nr_pages_request = min(100U, nr_pages - nr_allocated);
3021343ab817SUladzislau Rezki (Sony) 
3022c00b6b96SChen Wandun 			/* memory allocation should consider mempolicy, we can't
3023c00b6b96SChen Wandun 			 * wrongly use nearest node when nid == NUMA_NO_NODE,
3024c00b6b96SChen Wandun 			 * otherwise memory may be allocated in only one node,
302598af39d5SYixuan Cao 			 * but mempolicy wants to alloc memory by interleaving.
3026c00b6b96SChen Wandun 			 */
3027c00b6b96SChen Wandun 			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
30289376130cSMichal Hocko 				nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
3029c00b6b96SChen Wandun 							nr_pages_request,
3030c00b6b96SChen Wandun 							pages + nr_allocated);
3031c00b6b96SChen Wandun 
3032c00b6b96SChen Wandun 			else
30339376130cSMichal Hocko 				nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
3034c00b6b96SChen Wandun 							nr_pages_request,
3035c00b6b96SChen Wandun 							pages + nr_allocated);
3036343ab817SUladzislau Rezki (Sony) 
3037343ab817SUladzislau Rezki (Sony) 			nr_allocated += nr;
3038343ab817SUladzislau Rezki (Sony) 			cond_resched();
3039343ab817SUladzislau Rezki (Sony) 
3040343ab817SUladzislau Rezki (Sony) 			/*
3041343ab817SUladzislau Rezki (Sony) 			 * If zero or pages were obtained partly,
3042343ab817SUladzislau Rezki (Sony) 			 * fallback to a single page allocator.
3043343ab817SUladzislau Rezki (Sony) 			 */
3044343ab817SUladzislau Rezki (Sony) 			if (nr != nr_pages_request)
3045343ab817SUladzislau Rezki (Sony) 				break;
3046343ab817SUladzislau Rezki (Sony) 		}
3047e9c3cda4SMichal Hocko 	} else if (gfp & __GFP_NOFAIL) {
3048e9c3cda4SMichal Hocko 		/*
3049e9c3cda4SMichal Hocko 		 * Higher order nofail allocations are really expensive and
3050e9c3cda4SMichal Hocko 		 * potentially dangerous (pre-mature OOM, disruptive reclaim
3051e9c3cda4SMichal Hocko 		 * and compaction etc.
3052e9c3cda4SMichal Hocko 		 */
3053e9c3cda4SMichal Hocko 		alloc_gfp &= ~__GFP_NOFAIL;
3054e9c3cda4SMichal Hocko 		nofail = true;
30553b8000aeSNicholas Piggin 	}
305612b9f873SUladzislau Rezki 
305712b9f873SUladzislau Rezki 	/* High-order pages or fallback path if "bulk" fails. */
3058ffb29b1cSChen Wandun 	while (nr_allocated < nr_pages) {
3059dd544141SVasily Averin 		if (fatal_signal_pending(current))
3060dd544141SVasily Averin 			break;
3061dd544141SVasily Averin 
3062ffb29b1cSChen Wandun 		if (nid == NUMA_NO_NODE)
3063e9c3cda4SMichal Hocko 			page = alloc_pages(alloc_gfp, order);
3064ffb29b1cSChen Wandun 		else
3065e9c3cda4SMichal Hocko 			page = alloc_pages_node(nid, alloc_gfp, order);
3066e9c3cda4SMichal Hocko 		if (unlikely(!page)) {
3067e9c3cda4SMichal Hocko 			if (!nofail)
306812b9f873SUladzislau Rezki 				break;
3069e9c3cda4SMichal Hocko 
3070e9c3cda4SMichal Hocko 			/* fall back to the zero order allocations */
3071e9c3cda4SMichal Hocko 			alloc_gfp |= __GFP_NOFAIL;
3072e9c3cda4SMichal Hocko 			order = 0;
3073e9c3cda4SMichal Hocko 			continue;
3074e9c3cda4SMichal Hocko 		}
3075e9c3cda4SMichal Hocko 
30763b8000aeSNicholas Piggin 		/*
30773b8000aeSNicholas Piggin 		 * Higher order allocations must be able to be treated as
30783b8000aeSNicholas Piggin 		 * indepdenent small pages by callers (as they can with
30793b8000aeSNicholas Piggin 		 * small-page vmallocs). Some drivers do their own refcounting
30803b8000aeSNicholas Piggin 		 * on vmalloc_to_page() pages, some use page->mapping,
30813b8000aeSNicholas Piggin 		 * page->lru, etc.
30823b8000aeSNicholas Piggin 		 */
30833b8000aeSNicholas Piggin 		if (order)
30843b8000aeSNicholas Piggin 			split_page(page, order);
308512b9f873SUladzislau Rezki 
308612b9f873SUladzislau Rezki 		/*
308712b9f873SUladzislau Rezki 		 * Careful, we allocate and map page-order pages, but
308812b9f873SUladzislau Rezki 		 * tracking is done per PAGE_SIZE page so as to keep the
308912b9f873SUladzislau Rezki 		 * vm_struct APIs independent of the physical/mapped size.
309012b9f873SUladzislau Rezki 		 */
309112b9f873SUladzislau Rezki 		for (i = 0; i < (1U << order); i++)
309212b9f873SUladzislau Rezki 			pages[nr_allocated + i] = page + i;
309312b9f873SUladzislau Rezki 
309412b9f873SUladzislau Rezki 		cond_resched();
309512b9f873SUladzislau Rezki 		nr_allocated += 1U << order;
309612b9f873SUladzislau Rezki 	}
309712b9f873SUladzislau Rezki 
309812b9f873SUladzislau Rezki 	return nr_allocated;
309912b9f873SUladzislau Rezki }
310012b9f873SUladzislau Rezki 
3101e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3102121e6f32SNicholas Piggin 				 pgprot_t prot, unsigned int page_shift,
3103121e6f32SNicholas Piggin 				 int node)
31041da177e4SLinus Torvalds {
3105930f036bSDavid Rientjes 	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
31069376130cSMichal Hocko 	bool nofail = gfp_mask & __GFP_NOFAIL;
3107121e6f32SNicholas Piggin 	unsigned long addr = (unsigned long)area->addr;
3108121e6f32SNicholas Piggin 	unsigned long size = get_vm_area_size(area);
310934fe6537SAndrew Morton 	unsigned long array_size;
3110121e6f32SNicholas Piggin 	unsigned int nr_small_pages = size >> PAGE_SHIFT;
3111121e6f32SNicholas Piggin 	unsigned int page_order;
3112451769ebSMichal Hocko 	unsigned int flags;
3113451769ebSMichal Hocko 	int ret;
31141da177e4SLinus Torvalds 
3115121e6f32SNicholas Piggin 	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
311680b1d8fdSLorenzo Stoakes 
3117f255935bSChristoph Hellwig 	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3118f255935bSChristoph Hellwig 		gfp_mask |= __GFP_HIGHMEM;
31191da177e4SLinus Torvalds 
31201da177e4SLinus Torvalds 	/* Please note that the recursion is strictly bounded. */
31218757d5faSJan Kiszka 	if (array_size > PAGE_SIZE) {
31225c1f4e69SUladzislau Rezki (Sony) 		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
3123f255935bSChristoph Hellwig 					area->caller);
3124286e1ea3SAndrew Morton 	} else {
31255c1f4e69SUladzislau Rezki (Sony) 		area->pages = kmalloc_node(array_size, nested_gfp, node);
3126286e1ea3SAndrew Morton 	}
31277ea36242SAustin Kim 
31285c1f4e69SUladzislau Rezki (Sony) 	if (!area->pages) {
3129c3d77172SUladzislau Rezki (Sony) 		warn_alloc(gfp_mask, NULL,
3130f4bdfeafSUladzislau Rezki (Sony) 			"vmalloc error: size %lu, failed to allocated page array size %lu",
3131d70bec8cSNicholas Piggin 			nr_small_pages * PAGE_SIZE, array_size);
3132cd61413bSUladzislau Rezki (Sony) 		free_vm_area(area);
31331da177e4SLinus Torvalds 		return NULL;
31341da177e4SLinus Torvalds 	}
31351da177e4SLinus Torvalds 
3136121e6f32SNicholas Piggin 	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3137121e6f32SNicholas Piggin 	page_order = vm_area_page_order(area);
3138121e6f32SNicholas Piggin 
3139c3d77172SUladzislau Rezki (Sony) 	area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
3140c3d77172SUladzislau Rezki (Sony) 		node, page_order, nr_small_pages, area->pages);
31415c1f4e69SUladzislau Rezki (Sony) 
314297105f0aSRoman Gushchin 	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
31434e5aa1f4SShakeel Butt 	if (gfp_mask & __GFP_ACCOUNT) {
31443b8000aeSNicholas Piggin 		int i;
31454e5aa1f4SShakeel Butt 
31463b8000aeSNicholas Piggin 		for (i = 0; i < area->nr_pages; i++)
31473b8000aeSNicholas Piggin 			mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
31484e5aa1f4SShakeel Butt 	}
31495c1f4e69SUladzislau Rezki (Sony) 
31505c1f4e69SUladzislau Rezki (Sony) 	/*
31515c1f4e69SUladzislau Rezki (Sony) 	 * If not enough pages were obtained to accomplish an
3152f41f036bSChristoph Hellwig 	 * allocation request, free them via vfree() if any.
31535c1f4e69SUladzislau Rezki (Sony) 	 */
31545c1f4e69SUladzislau Rezki (Sony) 	if (area->nr_pages != nr_small_pages) {
315595a301eeSLorenzo Stoakes 		/*
315695a301eeSLorenzo Stoakes 		 * vm_area_alloc_pages() can fail due to insufficient memory but
315795a301eeSLorenzo Stoakes 		 * also:-
315895a301eeSLorenzo Stoakes 		 *
315995a301eeSLorenzo Stoakes 		 * - a pending fatal signal
316095a301eeSLorenzo Stoakes 		 * - insufficient huge page-order pages
316195a301eeSLorenzo Stoakes 		 *
316295a301eeSLorenzo Stoakes 		 * Since we always retry allocations at order-0 in the huge page
316395a301eeSLorenzo Stoakes 		 * case a warning for either is spurious.
316495a301eeSLorenzo Stoakes 		 */
316595a301eeSLorenzo Stoakes 		if (!fatal_signal_pending(current) && page_order == 0)
3166c3d77172SUladzislau Rezki (Sony) 			warn_alloc(gfp_mask, NULL,
316795a301eeSLorenzo Stoakes 				"vmalloc error: size %lu, failed to allocate pages",
316895a301eeSLorenzo Stoakes 				area->nr_pages * PAGE_SIZE);
31691da177e4SLinus Torvalds 		goto fail;
31701da177e4SLinus Torvalds 	}
3171121e6f32SNicholas Piggin 
3172451769ebSMichal Hocko 	/*
3173451769ebSMichal Hocko 	 * page tables allocations ignore external gfp mask, enforce it
3174451769ebSMichal Hocko 	 * by the scope API
3175451769ebSMichal Hocko 	 */
3176451769ebSMichal Hocko 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3177451769ebSMichal Hocko 		flags = memalloc_nofs_save();
3178451769ebSMichal Hocko 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3179451769ebSMichal Hocko 		flags = memalloc_noio_save();
3180451769ebSMichal Hocko 
31819376130cSMichal Hocko 	do {
3182451769ebSMichal Hocko 		ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3183451769ebSMichal Hocko 			page_shift);
31849376130cSMichal Hocko 		if (nofail && (ret < 0))
31859376130cSMichal Hocko 			schedule_timeout_uninterruptible(1);
31869376130cSMichal Hocko 	} while (nofail && (ret < 0));
3187451769ebSMichal Hocko 
3188451769ebSMichal Hocko 	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3189451769ebSMichal Hocko 		memalloc_nofs_restore(flags);
3190451769ebSMichal Hocko 	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3191451769ebSMichal Hocko 		memalloc_noio_restore(flags);
3192451769ebSMichal Hocko 
3193451769ebSMichal Hocko 	if (ret < 0) {
3194c3d77172SUladzislau Rezki (Sony) 		warn_alloc(gfp_mask, NULL,
3195f4bdfeafSUladzislau Rezki (Sony) 			"vmalloc error: size %lu, failed to map pages",
3196d70bec8cSNicholas Piggin 			area->nr_pages * PAGE_SIZE);
31971da177e4SLinus Torvalds 		goto fail;
3198d70bec8cSNicholas Piggin 	}
3199ed1f324cSChristoph Hellwig 
32001da177e4SLinus Torvalds 	return area->addr;
32011da177e4SLinus Torvalds 
32021da177e4SLinus Torvalds fail:
3203f41f036bSChristoph Hellwig 	vfree(area->addr);
32041da177e4SLinus Torvalds 	return NULL;
32051da177e4SLinus Torvalds }
32061da177e4SLinus Torvalds 
3207d0a21265SDavid Rientjes /**
3208d0a21265SDavid Rientjes  * __vmalloc_node_range - allocate virtually contiguous memory
3209d0a21265SDavid Rientjes  * @size:		  allocation size
3210d0a21265SDavid Rientjes  * @align:		  desired alignment
3211d0a21265SDavid Rientjes  * @start:		  vm area range start
3212d0a21265SDavid Rientjes  * @end:		  vm area range end
3213d0a21265SDavid Rientjes  * @gfp_mask:		  flags for the page level allocator
3214d0a21265SDavid Rientjes  * @prot:		  protection mask for the allocated pages
3215cb9e3c29SAndrey Ryabinin  * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
321600ef2d2fSDavid Rientjes  * @node:		  node to use for allocation or NUMA_NO_NODE
3217d0a21265SDavid Rientjes  * @caller:		  caller's return address
3218d0a21265SDavid Rientjes  *
3219d0a21265SDavid Rientjes  * Allocate enough pages to cover @size from the page level
3220b7d90e7aSMichal Hocko  * allocator with @gfp_mask flags. Please note that the full set of gfp
322130d3f011SMichal Hocko  * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
322230d3f011SMichal Hocko  * supported.
322330d3f011SMichal Hocko  * Zone modifiers are not supported. From the reclaim modifiers
322430d3f011SMichal Hocko  * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
322530d3f011SMichal Hocko  * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
322630d3f011SMichal Hocko  * __GFP_RETRY_MAYFAIL are not supported).
322730d3f011SMichal Hocko  *
322830d3f011SMichal Hocko  * __GFP_NOWARN can be used to suppress failures messages.
3229b7d90e7aSMichal Hocko  *
3230b7d90e7aSMichal Hocko  * Map them into contiguous kernel virtual space, using a pagetable
3231b7d90e7aSMichal Hocko  * protection of @prot.
3232a862f68aSMike Rapoport  *
3233a862f68aSMike Rapoport  * Return: the address of the area or %NULL on failure
3234d0a21265SDavid Rientjes  */
3235d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align,
3236d0a21265SDavid Rientjes 			unsigned long start, unsigned long end, gfp_t gfp_mask,
3237cb9e3c29SAndrey Ryabinin 			pgprot_t prot, unsigned long vm_flags, int node,
3238cb9e3c29SAndrey Ryabinin 			const void *caller)
3239930fc45aSChristoph Lameter {
3240d0a21265SDavid Rientjes 	struct vm_struct *area;
324119f1c3acSAndrey Konovalov 	void *ret;
3242f6e39794SAndrey Konovalov 	kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3243d0a21265SDavid Rientjes 	unsigned long real_size = size;
3244121e6f32SNicholas Piggin 	unsigned long real_align = align;
3245121e6f32SNicholas Piggin 	unsigned int shift = PAGE_SHIFT;
3246d0a21265SDavid Rientjes 
3247d70bec8cSNicholas Piggin 	if (WARN_ON_ONCE(!size))
3248d70bec8cSNicholas Piggin 		return NULL;
3249d70bec8cSNicholas Piggin 
3250d70bec8cSNicholas Piggin 	if ((size >> PAGE_SHIFT) > totalram_pages()) {
3251d70bec8cSNicholas Piggin 		warn_alloc(gfp_mask, NULL,
3252f4bdfeafSUladzislau Rezki (Sony) 			"vmalloc error: size %lu, exceeds total pages",
3253f4bdfeafSUladzislau Rezki (Sony) 			real_size);
3254d70bec8cSNicholas Piggin 		return NULL;
3255121e6f32SNicholas Piggin 	}
3256d0a21265SDavid Rientjes 
3257559089e0SSong Liu 	if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3258121e6f32SNicholas Piggin 		unsigned long size_per_node;
3259121e6f32SNicholas Piggin 
3260121e6f32SNicholas Piggin 		/*
3261121e6f32SNicholas Piggin 		 * Try huge pages. Only try for PAGE_KERNEL allocations,
3262121e6f32SNicholas Piggin 		 * others like modules don't yet expect huge pages in
3263121e6f32SNicholas Piggin 		 * their allocations due to apply_to_page_range not
3264121e6f32SNicholas Piggin 		 * supporting them.
3265121e6f32SNicholas Piggin 		 */
3266121e6f32SNicholas Piggin 
3267121e6f32SNicholas Piggin 		size_per_node = size;
3268121e6f32SNicholas Piggin 		if (node == NUMA_NO_NODE)
3269121e6f32SNicholas Piggin 			size_per_node /= num_online_nodes();
32703382bbeeSChristophe Leroy 		if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3271121e6f32SNicholas Piggin 			shift = PMD_SHIFT;
32723382bbeeSChristophe Leroy 		else
32733382bbeeSChristophe Leroy 			shift = arch_vmap_pte_supported_shift(size_per_node);
32743382bbeeSChristophe Leroy 
3275121e6f32SNicholas Piggin 		align = max(real_align, 1UL << shift);
3276121e6f32SNicholas Piggin 		size = ALIGN(real_size, 1UL << shift);
3277121e6f32SNicholas Piggin 	}
3278121e6f32SNicholas Piggin 
3279121e6f32SNicholas Piggin again:
32807ca3027bSDaniel Axtens 	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
32817ca3027bSDaniel Axtens 				  VM_UNINITIALIZED | vm_flags, start, end, node,
32827ca3027bSDaniel Axtens 				  gfp_mask, caller);
3283d70bec8cSNicholas Piggin 	if (!area) {
32849376130cSMichal Hocko 		bool nofail = gfp_mask & __GFP_NOFAIL;
3285d70bec8cSNicholas Piggin 		warn_alloc(gfp_mask, NULL,
32869376130cSMichal Hocko 			"vmalloc error: size %lu, vm_struct allocation failed%s",
32879376130cSMichal Hocko 			real_size, (nofail) ? ". Retrying." : "");
32889376130cSMichal Hocko 		if (nofail) {
32899376130cSMichal Hocko 			schedule_timeout_uninterruptible(1);
32909376130cSMichal Hocko 			goto again;
32919376130cSMichal Hocko 		}
3292de7d2b56SJoe Perches 		goto fail;
3293d70bec8cSNicholas Piggin 	}
3294d0a21265SDavid Rientjes 
3295f6e39794SAndrey Konovalov 	/*
3296f6e39794SAndrey Konovalov 	 * Prepare arguments for __vmalloc_area_node() and
3297f6e39794SAndrey Konovalov 	 * kasan_unpoison_vmalloc().
3298f6e39794SAndrey Konovalov 	 */
3299f6e39794SAndrey Konovalov 	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3300f6e39794SAndrey Konovalov 		if (kasan_hw_tags_enabled()) {
330101d92c7fSAndrey Konovalov 			/*
330201d92c7fSAndrey Konovalov 			 * Modify protection bits to allow tagging.
3303f6e39794SAndrey Konovalov 			 * This must be done before mapping.
330401d92c7fSAndrey Konovalov 			 */
330501d92c7fSAndrey Konovalov 			prot = arch_vmap_pgprot_tagged(prot);
330601d92c7fSAndrey Konovalov 
330723689e91SAndrey Konovalov 			/*
3308f6e39794SAndrey Konovalov 			 * Skip page_alloc poisoning and zeroing for physical
3309f6e39794SAndrey Konovalov 			 * pages backing VM_ALLOC mapping. Memory is instead
3310f6e39794SAndrey Konovalov 			 * poisoned and zeroed by kasan_unpoison_vmalloc().
331123689e91SAndrey Konovalov 			 */
33120a54864fSPeter Collingbourne 			gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
331323689e91SAndrey Konovalov 		}
331423689e91SAndrey Konovalov 
3315f6e39794SAndrey Konovalov 		/* Take note that the mapping is PAGE_KERNEL. */
3316f6e39794SAndrey Konovalov 		kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3317f6e39794SAndrey Konovalov 	}
3318f6e39794SAndrey Konovalov 
331901d92c7fSAndrey Konovalov 	/* Allocate physical pages and map them into vmalloc space. */
332019f1c3acSAndrey Konovalov 	ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
332119f1c3acSAndrey Konovalov 	if (!ret)
3322121e6f32SNicholas Piggin 		goto fail;
332389219d37SCatalin Marinas 
332423689e91SAndrey Konovalov 	/*
332523689e91SAndrey Konovalov 	 * Mark the pages as accessible, now that they are mapped.
33266c2f761dSAndrey Konovalov 	 * The condition for setting KASAN_VMALLOC_INIT should complement the
33276c2f761dSAndrey Konovalov 	 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
33286c2f761dSAndrey Konovalov 	 * to make sure that memory is initialized under the same conditions.
3329f6e39794SAndrey Konovalov 	 * Tag-based KASAN modes only assign tags to normal non-executable
3330f6e39794SAndrey Konovalov 	 * allocations, see __kasan_unpoison_vmalloc().
333123689e91SAndrey Konovalov 	 */
3332f6e39794SAndrey Konovalov 	kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
33336c2f761dSAndrey Konovalov 	if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
33346c2f761dSAndrey Konovalov 	    (gfp_mask & __GFP_SKIP_ZERO))
333523689e91SAndrey Konovalov 		kasan_flags |= KASAN_VMALLOC_INIT;
3336f6e39794SAndrey Konovalov 	/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
333723689e91SAndrey Konovalov 	area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
333819f1c3acSAndrey Konovalov 
333989219d37SCatalin Marinas 	/*
334020fc02b4SZhang Yanfei 	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
334120fc02b4SZhang Yanfei 	 * flag. It means that vm_struct is not fully initialized.
33424341fa45SJoonsoo Kim 	 * Now, it is fully initialized, so remove this flag here.
3343f5252e00SMitsuo Hayasaka 	 */
334420fc02b4SZhang Yanfei 	clear_vm_uninitialized_flag(area);
3345f5252e00SMitsuo Hayasaka 
33467ca3027bSDaniel Axtens 	size = PAGE_ALIGN(size);
334760115fa5SKefeng Wang 	if (!(vm_flags & VM_DEFER_KMEMLEAK))
334894f4a161SCatalin Marinas 		kmemleak_vmalloc(area, size, gfp_mask);
334989219d37SCatalin Marinas 
335019f1c3acSAndrey Konovalov 	return area->addr;
3351de7d2b56SJoe Perches 
3352de7d2b56SJoe Perches fail:
3353121e6f32SNicholas Piggin 	if (shift > PAGE_SHIFT) {
3354121e6f32SNicholas Piggin 		shift = PAGE_SHIFT;
3355121e6f32SNicholas Piggin 		align = real_align;
3356121e6f32SNicholas Piggin 		size = real_size;
3357121e6f32SNicholas Piggin 		goto again;
3358121e6f32SNicholas Piggin 	}
3359121e6f32SNicholas Piggin 
3360de7d2b56SJoe Perches 	return NULL;
3361930fc45aSChristoph Lameter }
3362930fc45aSChristoph Lameter 
33631da177e4SLinus Torvalds /**
3364930fc45aSChristoph Lameter  * __vmalloc_node - allocate virtually contiguous memory
33651da177e4SLinus Torvalds  * @size:	    allocation size
33662dca6999SDavid Miller  * @align:	    desired alignment
33671da177e4SLinus Torvalds  * @gfp_mask:	    flags for the page level allocator
336800ef2d2fSDavid Rientjes  * @node:	    node to use for allocation or NUMA_NO_NODE
3369c85d194bSRandy Dunlap  * @caller:	    caller's return address
33701da177e4SLinus Torvalds  *
3371f38fcb9cSChristoph Hellwig  * Allocate enough pages to cover @size from the page level allocator with
3372f38fcb9cSChristoph Hellwig  * @gfp_mask flags.  Map them into contiguous kernel virtual space.
3373a7c3e901SMichal Hocko  *
3374dcda9b04SMichal Hocko  * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3375a7c3e901SMichal Hocko  * and __GFP_NOFAIL are not supported
3376a7c3e901SMichal Hocko  *
3377a7c3e901SMichal Hocko  * Any use of gfp flags outside of GFP_KERNEL should be consulted
3378a7c3e901SMichal Hocko  * with mm people.
3379a862f68aSMike Rapoport  *
3380a862f68aSMike Rapoport  * Return: pointer to the allocated memory or %NULL on error
33811da177e4SLinus Torvalds  */
33822b905948SChristoph Hellwig void *__vmalloc_node(unsigned long size, unsigned long align,
3383f38fcb9cSChristoph Hellwig 			    gfp_t gfp_mask, int node, const void *caller)
33841da177e4SLinus Torvalds {
3385d0a21265SDavid Rientjes 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3386f38fcb9cSChristoph Hellwig 				gfp_mask, PAGE_KERNEL, 0, node, caller);
33871da177e4SLinus Torvalds }
3388c3f896dcSChristoph Hellwig /*
3389c3f896dcSChristoph Hellwig  * This is only for performance analysis of vmalloc and stress purpose.
3390c3f896dcSChristoph Hellwig  * It is required by vmalloc test module, therefore do not use it other
3391c3f896dcSChristoph Hellwig  * than that.
3392c3f896dcSChristoph Hellwig  */
3393c3f896dcSChristoph Hellwig #ifdef CONFIG_TEST_VMALLOC_MODULE
3394c3f896dcSChristoph Hellwig EXPORT_SYMBOL_GPL(__vmalloc_node);
3395c3f896dcSChristoph Hellwig #endif
33961da177e4SLinus Torvalds 
339788dca4caSChristoph Hellwig void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3398930fc45aSChristoph Lameter {
3399f38fcb9cSChristoph Hellwig 	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
340023016969SChristoph Lameter 				__builtin_return_address(0));
3401930fc45aSChristoph Lameter }
34021da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc);
34031da177e4SLinus Torvalds 
34041da177e4SLinus Torvalds /**
34051da177e4SLinus Torvalds  * vmalloc - allocate virtually contiguous memory
34061da177e4SLinus Torvalds  * @size:    allocation size
340792eac168SMike Rapoport  *
34081da177e4SLinus Torvalds  * Allocate enough pages to cover @size from the page level
34091da177e4SLinus Torvalds  * allocator and map them into contiguous kernel virtual space.
34101da177e4SLinus Torvalds  *
3411c1c8897fSMichael Opdenacker  * For tight control over page level allocator and protection flags
34121da177e4SLinus Torvalds  * use __vmalloc() instead.
3413a862f68aSMike Rapoport  *
3414a862f68aSMike Rapoport  * Return: pointer to the allocated memory or %NULL on error
34151da177e4SLinus Torvalds  */
34161da177e4SLinus Torvalds void *vmalloc(unsigned long size)
34171da177e4SLinus Torvalds {
34184d39d728SChristoph Hellwig 	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
34194d39d728SChristoph Hellwig 				__builtin_return_address(0));
34201da177e4SLinus Torvalds }
34211da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc);
34221da177e4SLinus Torvalds 
3423930fc45aSChristoph Lameter /**
3424559089e0SSong Liu  * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
342515a64f5aSClaudio Imbrenda  * @size:      allocation size
3426559089e0SSong Liu  * @gfp_mask:  flags for the page level allocator
342715a64f5aSClaudio Imbrenda  *
3428559089e0SSong Liu  * Allocate enough pages to cover @size from the page level
342915a64f5aSClaudio Imbrenda  * allocator and map them into contiguous kernel virtual space.
3430559089e0SSong Liu  * If @size is greater than or equal to PMD_SIZE, allow using
3431559089e0SSong Liu  * huge pages for the memory
343215a64f5aSClaudio Imbrenda  *
343315a64f5aSClaudio Imbrenda  * Return: pointer to the allocated memory or %NULL on error
343415a64f5aSClaudio Imbrenda  */
3435559089e0SSong Liu void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
343615a64f5aSClaudio Imbrenda {
343715a64f5aSClaudio Imbrenda 	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3438559089e0SSong Liu 				    gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
343915a64f5aSClaudio Imbrenda 				    NUMA_NO_NODE, __builtin_return_address(0));
344015a64f5aSClaudio Imbrenda }
3441559089e0SSong Liu EXPORT_SYMBOL_GPL(vmalloc_huge);
344215a64f5aSClaudio Imbrenda 
344315a64f5aSClaudio Imbrenda /**
3444e1ca7788SDave Young  * vzalloc - allocate virtually contiguous memory with zero fill
3445e1ca7788SDave Young  * @size:    allocation size
344692eac168SMike Rapoport  *
3447e1ca7788SDave Young  * Allocate enough pages to cover @size from the page level
3448e1ca7788SDave Young  * allocator and map them into contiguous kernel virtual space.
3449e1ca7788SDave Young  * The memory allocated is set to zero.
3450e1ca7788SDave Young  *
3451e1ca7788SDave Young  * For tight control over page level allocator and protection flags
3452e1ca7788SDave Young  * use __vmalloc() instead.
3453a862f68aSMike Rapoport  *
3454a862f68aSMike Rapoport  * Return: pointer to the allocated memory or %NULL on error
3455e1ca7788SDave Young  */
3456e1ca7788SDave Young void *vzalloc(unsigned long size)
3457e1ca7788SDave Young {
34584d39d728SChristoph Hellwig 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
34594d39d728SChristoph Hellwig 				__builtin_return_address(0));
3460e1ca7788SDave Young }
3461e1ca7788SDave Young EXPORT_SYMBOL(vzalloc);
3462e1ca7788SDave Young 
3463e1ca7788SDave Young /**
3464ead04089SRolf Eike Beer  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
346583342314SNick Piggin  * @size: allocation size
3466ead04089SRolf Eike Beer  *
3467ead04089SRolf Eike Beer  * The resulting memory area is zeroed so it can be mapped to userspace
3468ead04089SRolf Eike Beer  * without leaking data.
3469a862f68aSMike Rapoport  *
3470a862f68aSMike Rapoport  * Return: pointer to the allocated memory or %NULL on error
347183342314SNick Piggin  */
347283342314SNick Piggin void *vmalloc_user(unsigned long size)
347383342314SNick Piggin {
3474bc84c535SRoman Penyaev 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3475bc84c535SRoman Penyaev 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3476bc84c535SRoman Penyaev 				    VM_USERMAP, NUMA_NO_NODE,
347700ef2d2fSDavid Rientjes 				    __builtin_return_address(0));
347883342314SNick Piggin }
347983342314SNick Piggin EXPORT_SYMBOL(vmalloc_user);
348083342314SNick Piggin 
348183342314SNick Piggin /**
3482930fc45aSChristoph Lameter  * vmalloc_node - allocate memory on a specific node
3483930fc45aSChristoph Lameter  * @size:	  allocation size
3484d44e0780SRandy Dunlap  * @node:	  numa node
3485930fc45aSChristoph Lameter  *
3486930fc45aSChristoph Lameter  * Allocate enough pages to cover @size from the page level
3487930fc45aSChristoph Lameter  * allocator and map them into contiguous kernel virtual space.
3488930fc45aSChristoph Lameter  *
3489c1c8897fSMichael Opdenacker  * For tight control over page level allocator and protection flags
3490930fc45aSChristoph Lameter  * use __vmalloc() instead.
3491a862f68aSMike Rapoport  *
3492a862f68aSMike Rapoport  * Return: pointer to the allocated memory or %NULL on error
3493930fc45aSChristoph Lameter  */
3494930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node)
3495930fc45aSChristoph Lameter {
3496f38fcb9cSChristoph Hellwig 	return __vmalloc_node(size, 1, GFP_KERNEL, node,
3497f38fcb9cSChristoph Hellwig 			__builtin_return_address(0));
3498930fc45aSChristoph Lameter }
3499930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node);
3500930fc45aSChristoph Lameter 
3501e1ca7788SDave Young /**
3502e1ca7788SDave Young  * vzalloc_node - allocate memory on a specific node with zero fill
3503e1ca7788SDave Young  * @size:	allocation size
3504e1ca7788SDave Young  * @node:	numa node
3505e1ca7788SDave Young  *
3506e1ca7788SDave Young  * Allocate enough pages to cover @size from the page level
3507e1ca7788SDave Young  * allocator and map them into contiguous kernel virtual space.
3508e1ca7788SDave Young  * The memory allocated is set to zero.
3509e1ca7788SDave Young  *
3510a862f68aSMike Rapoport  * Return: pointer to the allocated memory or %NULL on error
3511e1ca7788SDave Young  */
3512e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node)
3513e1ca7788SDave Young {
35144d39d728SChristoph Hellwig 	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
35154d39d728SChristoph Hellwig 				__builtin_return_address(0));
3516e1ca7788SDave Young }
3517e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node);
3518e1ca7788SDave Young 
35190d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3520698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
35210d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3522698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
35230d08e0d3SAndi Kleen #else
3524698d0831SMichal Hocko /*
3525698d0831SMichal Hocko  * 64b systems should always have either DMA or DMA32 zones. For others
3526698d0831SMichal Hocko  * GFP_DMA32 should do the right thing and use the normal zone.
3527698d0831SMichal Hocko  */
352868d68ff6SZhiyuan Dai #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
35290d08e0d3SAndi Kleen #endif
35300d08e0d3SAndi Kleen 
35311da177e4SLinus Torvalds /**
35321da177e4SLinus Torvalds  * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
35331da177e4SLinus Torvalds  * @size:	allocation size
35341da177e4SLinus Torvalds  *
35351da177e4SLinus Torvalds  * Allocate enough 32bit PA addressable pages to cover @size from the
35361da177e4SLinus Torvalds  * page level allocator and map them into contiguous kernel virtual space.
3537a862f68aSMike Rapoport  *
3538a862f68aSMike Rapoport  * Return: pointer to the allocated memory or %NULL on error
35391da177e4SLinus Torvalds  */
35401da177e4SLinus Torvalds void *vmalloc_32(unsigned long size)
35411da177e4SLinus Torvalds {
3542f38fcb9cSChristoph Hellwig 	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3543f38fcb9cSChristoph Hellwig 			__builtin_return_address(0));
35441da177e4SLinus Torvalds }
35451da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32);
35461da177e4SLinus Torvalds 
354783342314SNick Piggin /**
3548ead04089SRolf Eike Beer  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
354983342314SNick Piggin  * @size:	     allocation size
3550ead04089SRolf Eike Beer  *
3551ead04089SRolf Eike Beer  * The resulting memory area is 32bit addressable and zeroed so it can be
3552ead04089SRolf Eike Beer  * mapped to userspace without leaking data.
3553a862f68aSMike Rapoport  *
3554a862f68aSMike Rapoport  * Return: pointer to the allocated memory or %NULL on error
355583342314SNick Piggin  */
355683342314SNick Piggin void *vmalloc_32_user(unsigned long size)
355783342314SNick Piggin {
3558bc84c535SRoman Penyaev 	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3559bc84c535SRoman Penyaev 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3560bc84c535SRoman Penyaev 				    VM_USERMAP, NUMA_NO_NODE,
35615a82ac71SRoman Penyaev 				    __builtin_return_address(0));
356283342314SNick Piggin }
356383342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user);
356483342314SNick Piggin 
3565d0107eb0SKAMEZAWA Hiroyuki /*
35664c91c07cSLorenzo Stoakes  * Atomically zero bytes in the iterator.
35674c91c07cSLorenzo Stoakes  *
35684c91c07cSLorenzo Stoakes  * Returns the number of zeroed bytes.
3569d0107eb0SKAMEZAWA Hiroyuki  */
35704c91c07cSLorenzo Stoakes static size_t zero_iter(struct iov_iter *iter, size_t count)
3571d0107eb0SKAMEZAWA Hiroyuki {
35724c91c07cSLorenzo Stoakes 	size_t remains = count;
3573d0107eb0SKAMEZAWA Hiroyuki 
35744c91c07cSLorenzo Stoakes 	while (remains > 0) {
35754c91c07cSLorenzo Stoakes 		size_t num, copied;
35764c91c07cSLorenzo Stoakes 
35770e4bc271SLu Hongfei 		num = min_t(size_t, remains, PAGE_SIZE);
35784c91c07cSLorenzo Stoakes 		copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
35794c91c07cSLorenzo Stoakes 		remains -= copied;
35804c91c07cSLorenzo Stoakes 
35814c91c07cSLorenzo Stoakes 		if (copied < num)
35824c91c07cSLorenzo Stoakes 			break;
35834c91c07cSLorenzo Stoakes 	}
35844c91c07cSLorenzo Stoakes 
35854c91c07cSLorenzo Stoakes 	return count - remains;
35864c91c07cSLorenzo Stoakes }
35874c91c07cSLorenzo Stoakes 
35884c91c07cSLorenzo Stoakes /*
35894c91c07cSLorenzo Stoakes  * small helper routine, copy contents to iter from addr.
35904c91c07cSLorenzo Stoakes  * If the page is not present, fill zero.
35914c91c07cSLorenzo Stoakes  *
35924c91c07cSLorenzo Stoakes  * Returns the number of copied bytes.
35934c91c07cSLorenzo Stoakes  */
35944c91c07cSLorenzo Stoakes static size_t aligned_vread_iter(struct iov_iter *iter,
35954c91c07cSLorenzo Stoakes 				 const char *addr, size_t count)
35964c91c07cSLorenzo Stoakes {
35974c91c07cSLorenzo Stoakes 	size_t remains = count;
35984c91c07cSLorenzo Stoakes 	struct page *page;
35994c91c07cSLorenzo Stoakes 
36004c91c07cSLorenzo Stoakes 	while (remains > 0) {
3601d0107eb0SKAMEZAWA Hiroyuki 		unsigned long offset, length;
36024c91c07cSLorenzo Stoakes 		size_t copied = 0;
3603d0107eb0SKAMEZAWA Hiroyuki 
3604891c49abSAlexander Kuleshov 		offset = offset_in_page(addr);
3605d0107eb0SKAMEZAWA Hiroyuki 		length = PAGE_SIZE - offset;
36064c91c07cSLorenzo Stoakes 		if (length > remains)
36074c91c07cSLorenzo Stoakes 			length = remains;
36084c91c07cSLorenzo Stoakes 		page = vmalloc_to_page(addr);
3609d0107eb0SKAMEZAWA Hiroyuki 		/*
36104c91c07cSLorenzo Stoakes 		 * To do safe access to this _mapped_ area, we need lock. But
36114c91c07cSLorenzo Stoakes 		 * adding lock here means that we need to add overhead of
36124c91c07cSLorenzo Stoakes 		 * vmalloc()/vfree() calls for this _debug_ interface, rarely
36134c91c07cSLorenzo Stoakes 		 * used. Instead of that, we'll use an local mapping via
36144c91c07cSLorenzo Stoakes 		 * copy_page_to_iter_nofault() and accept a small overhead in
36154c91c07cSLorenzo Stoakes 		 * this access function.
3616d0107eb0SKAMEZAWA Hiroyuki 		 */
36174c91c07cSLorenzo Stoakes 		if (page)
36184c91c07cSLorenzo Stoakes 			copied = copy_page_to_iter_nofault(page, offset,
36194c91c07cSLorenzo Stoakes 							   length, iter);
36204c91c07cSLorenzo Stoakes 		else
36214c91c07cSLorenzo Stoakes 			copied = zero_iter(iter, length);
3622d0107eb0SKAMEZAWA Hiroyuki 
36234c91c07cSLorenzo Stoakes 		addr += copied;
36244c91c07cSLorenzo Stoakes 		remains -= copied;
36254c91c07cSLorenzo Stoakes 
36264c91c07cSLorenzo Stoakes 		if (copied != length)
36274c91c07cSLorenzo Stoakes 			break;
3628d0107eb0SKAMEZAWA Hiroyuki 	}
3629d0107eb0SKAMEZAWA Hiroyuki 
36304c91c07cSLorenzo Stoakes 	return count - remains;
36314c91c07cSLorenzo Stoakes }
36324c91c07cSLorenzo Stoakes 
36334c91c07cSLorenzo Stoakes /*
36344c91c07cSLorenzo Stoakes  * Read from a vm_map_ram region of memory.
36354c91c07cSLorenzo Stoakes  *
36364c91c07cSLorenzo Stoakes  * Returns the number of copied bytes.
36374c91c07cSLorenzo Stoakes  */
36384c91c07cSLorenzo Stoakes static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
36394c91c07cSLorenzo Stoakes 				  size_t count, unsigned long flags)
364006c89946SBaoquan He {
364106c89946SBaoquan He 	char *start;
364206c89946SBaoquan He 	struct vmap_block *vb;
3643062eacf5SUladzislau Rezki (Sony) 	struct xarray *xa;
364406c89946SBaoquan He 	unsigned long offset;
36454c91c07cSLorenzo Stoakes 	unsigned int rs, re;
36464c91c07cSLorenzo Stoakes 	size_t remains, n;
364706c89946SBaoquan He 
364806c89946SBaoquan He 	/*
364906c89946SBaoquan He 	 * If it's area created by vm_map_ram() interface directly, but
365006c89946SBaoquan He 	 * not further subdividing and delegating management to vmap_block,
365106c89946SBaoquan He 	 * handle it here.
365206c89946SBaoquan He 	 */
36534c91c07cSLorenzo Stoakes 	if (!(flags & VMAP_BLOCK))
36544c91c07cSLorenzo Stoakes 		return aligned_vread_iter(iter, addr, count);
36554c91c07cSLorenzo Stoakes 
36564c91c07cSLorenzo Stoakes 	remains = count;
365706c89946SBaoquan He 
365806c89946SBaoquan He 	/*
365906c89946SBaoquan He 	 * Area is split into regions and tracked with vmap_block, read out
366006c89946SBaoquan He 	 * each region and zero fill the hole between regions.
366106c89946SBaoquan He 	 */
3662fa1c77c1SUladzislau Rezki (Sony) 	xa = addr_to_vb_xa((unsigned long) addr);
3663062eacf5SUladzislau Rezki (Sony) 	vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
366406c89946SBaoquan He 	if (!vb)
36654c91c07cSLorenzo Stoakes 		goto finished_zero;
366606c89946SBaoquan He 
366706c89946SBaoquan He 	spin_lock(&vb->lock);
366806c89946SBaoquan He 	if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
366906c89946SBaoquan He 		spin_unlock(&vb->lock);
36704c91c07cSLorenzo Stoakes 		goto finished_zero;
36714c91c07cSLorenzo Stoakes 	}
36724c91c07cSLorenzo Stoakes 
36734c91c07cSLorenzo Stoakes 	for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
36744c91c07cSLorenzo Stoakes 		size_t copied;
36754c91c07cSLorenzo Stoakes 
36764c91c07cSLorenzo Stoakes 		if (remains == 0)
36774c91c07cSLorenzo Stoakes 			goto finished;
36784c91c07cSLorenzo Stoakes 
36794c91c07cSLorenzo Stoakes 		start = vmap_block_vaddr(vb->va->va_start, rs);
36804c91c07cSLorenzo Stoakes 
36814c91c07cSLorenzo Stoakes 		if (addr < start) {
36824c91c07cSLorenzo Stoakes 			size_t to_zero = min_t(size_t, start - addr, remains);
36834c91c07cSLorenzo Stoakes 			size_t zeroed = zero_iter(iter, to_zero);
36844c91c07cSLorenzo Stoakes 
36854c91c07cSLorenzo Stoakes 			addr += zeroed;
36864c91c07cSLorenzo Stoakes 			remains -= zeroed;
36874c91c07cSLorenzo Stoakes 
36884c91c07cSLorenzo Stoakes 			if (remains == 0 || zeroed != to_zero)
368906c89946SBaoquan He 				goto finished;
369006c89946SBaoquan He 		}
36914c91c07cSLorenzo Stoakes 
369206c89946SBaoquan He 		/*it could start reading from the middle of used region*/
369306c89946SBaoquan He 		offset = offset_in_page(addr);
369406c89946SBaoquan He 		n = ((re - rs + 1) << PAGE_SHIFT) - offset;
36954c91c07cSLorenzo Stoakes 		if (n > remains)
36964c91c07cSLorenzo Stoakes 			n = remains;
369706c89946SBaoquan He 
36984c91c07cSLorenzo Stoakes 		copied = aligned_vread_iter(iter, start + offset, n);
36994c91c07cSLorenzo Stoakes 
37004c91c07cSLorenzo Stoakes 		addr += copied;
37014c91c07cSLorenzo Stoakes 		remains -= copied;
37024c91c07cSLorenzo Stoakes 
37034c91c07cSLorenzo Stoakes 		if (copied != n)
37044c91c07cSLorenzo Stoakes 			goto finished;
370506c89946SBaoquan He 	}
37064c91c07cSLorenzo Stoakes 
370706c89946SBaoquan He 	spin_unlock(&vb->lock);
370806c89946SBaoquan He 
37094c91c07cSLorenzo Stoakes finished_zero:
371006c89946SBaoquan He 	/* zero-fill the left dirty or free regions */
37114c91c07cSLorenzo Stoakes 	return count - remains + zero_iter(iter, remains);
37124c91c07cSLorenzo Stoakes finished:
37134c91c07cSLorenzo Stoakes 	/* We couldn't copy/zero everything */
37144c91c07cSLorenzo Stoakes 	spin_unlock(&vb->lock);
37154c91c07cSLorenzo Stoakes 	return count - remains;
371606c89946SBaoquan He }
371706c89946SBaoquan He 
3718d0107eb0SKAMEZAWA Hiroyuki /**
37194c91c07cSLorenzo Stoakes  * vread_iter() - read vmalloc area in a safe way to an iterator.
37204c91c07cSLorenzo Stoakes  * @iter:         the iterator to which data should be written.
3721d0107eb0SKAMEZAWA Hiroyuki  * @addr:         vm address.
3722d0107eb0SKAMEZAWA Hiroyuki  * @count:        number of bytes to be read.
3723d0107eb0SKAMEZAWA Hiroyuki  *
3724d0107eb0SKAMEZAWA Hiroyuki  * This function checks that addr is a valid vmalloc'ed area, and
3725d0107eb0SKAMEZAWA Hiroyuki  * copy data from that area to a given buffer. If the given memory range
3726d0107eb0SKAMEZAWA Hiroyuki  * of [addr...addr+count) includes some valid address, data is copied to
3727d0107eb0SKAMEZAWA Hiroyuki  * proper area of @buf. If there are memory holes, they'll be zero-filled.
3728d0107eb0SKAMEZAWA Hiroyuki  * IOREMAP area is treated as memory hole and no copy is done.
3729d0107eb0SKAMEZAWA Hiroyuki  *
3730d0107eb0SKAMEZAWA Hiroyuki  * If [addr...addr+count) doesn't includes any intersects with alive
3731a8e5202dSCong Wang  * vm_struct area, returns 0. @buf should be kernel's buffer.
3732d0107eb0SKAMEZAWA Hiroyuki  *
3733d0107eb0SKAMEZAWA Hiroyuki  * Note: In usual ops, vread() is never necessary because the caller
3734d0107eb0SKAMEZAWA Hiroyuki  * should know vmalloc() area is valid and can use memcpy().
3735d0107eb0SKAMEZAWA Hiroyuki  * This is for routines which have to access vmalloc area without
3736bbcd53c9SDavid Hildenbrand  * any information, as /proc/kcore.
3737a862f68aSMike Rapoport  *
3738a862f68aSMike Rapoport  * Return: number of bytes for which addr and buf should be increased
3739a862f68aSMike Rapoport  * (same number as @count) or %0 if [addr...addr+count) doesn't
3740a862f68aSMike Rapoport  * include any intersection with valid vmalloc area
3741d0107eb0SKAMEZAWA Hiroyuki  */
37424c91c07cSLorenzo Stoakes long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
37431da177e4SLinus Torvalds {
3744e81ce85fSJoonsoo Kim 	struct vmap_area *va;
3745e81ce85fSJoonsoo Kim 	struct vm_struct *vm;
37464c91c07cSLorenzo Stoakes 	char *vaddr;
37474c91c07cSLorenzo Stoakes 	size_t n, size, flags, remains;
37481da177e4SLinus Torvalds 
37494aff1dc4SAndrey Konovalov 	addr = kasan_reset_tag(addr);
37504aff1dc4SAndrey Konovalov 
37511da177e4SLinus Torvalds 	/* Don't allow overflow */
37521da177e4SLinus Torvalds 	if ((unsigned long) addr + count < count)
37531da177e4SLinus Torvalds 		count = -(unsigned long) addr;
37541da177e4SLinus Torvalds 
37554c91c07cSLorenzo Stoakes 	remains = count;
37564c91c07cSLorenzo Stoakes 
3757e81ce85fSJoonsoo Kim 	spin_lock(&vmap_area_lock);
3758f181234aSChen Wandun 	va = find_vmap_area_exceed_addr((unsigned long)addr);
3759f608788cSSerapheim Dimitropoulos 	if (!va)
37604c91c07cSLorenzo Stoakes 		goto finished_zero;
3761f181234aSChen Wandun 
3762f181234aSChen Wandun 	/* no intersects with alive vmap_area */
37634c91c07cSLorenzo Stoakes 	if ((unsigned long)addr + remains <= va->va_start)
37644c91c07cSLorenzo Stoakes 		goto finished_zero;
3765f181234aSChen Wandun 
3766f608788cSSerapheim Dimitropoulos 	list_for_each_entry_from(va, &vmap_area_list, list) {
37674c91c07cSLorenzo Stoakes 		size_t copied;
37684c91c07cSLorenzo Stoakes 
37694c91c07cSLorenzo Stoakes 		if (remains == 0)
37704c91c07cSLorenzo Stoakes 			goto finished;
3771e81ce85fSJoonsoo Kim 
377206c89946SBaoquan He 		vm = va->vm;
377306c89946SBaoquan He 		flags = va->flags & VMAP_FLAGS_MASK;
377406c89946SBaoquan He 		/*
377506c89946SBaoquan He 		 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
377606c89946SBaoquan He 		 * be set together with VMAP_RAM.
377706c89946SBaoquan He 		 */
377806c89946SBaoquan He 		WARN_ON(flags == VMAP_BLOCK);
377906c89946SBaoquan He 
378006c89946SBaoquan He 		if (!vm && !flags)
3781e81ce85fSJoonsoo Kim 			continue;
3782e81ce85fSJoonsoo Kim 
378330a7a9b1SBaoquan He 		if (vm && (vm->flags & VM_UNINITIALIZED))
378430a7a9b1SBaoquan He 			continue;
37854c91c07cSLorenzo Stoakes 
378630a7a9b1SBaoquan He 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
378730a7a9b1SBaoquan He 		smp_rmb();
378830a7a9b1SBaoquan He 
378906c89946SBaoquan He 		vaddr = (char *) va->va_start;
379006c89946SBaoquan He 		size = vm ? get_vm_area_size(vm) : va_size(va);
379106c89946SBaoquan He 
379206c89946SBaoquan He 		if (addr >= vaddr + size)
37931da177e4SLinus Torvalds 			continue;
37944c91c07cSLorenzo Stoakes 
37954c91c07cSLorenzo Stoakes 		if (addr < vaddr) {
37964c91c07cSLorenzo Stoakes 			size_t to_zero = min_t(size_t, vaddr - addr, remains);
37974c91c07cSLorenzo Stoakes 			size_t zeroed = zero_iter(iter, to_zero);
37984c91c07cSLorenzo Stoakes 
37994c91c07cSLorenzo Stoakes 			addr += zeroed;
38004c91c07cSLorenzo Stoakes 			remains -= zeroed;
38014c91c07cSLorenzo Stoakes 
38024c91c07cSLorenzo Stoakes 			if (remains == 0 || zeroed != to_zero)
38031da177e4SLinus Torvalds 				goto finished;
38041da177e4SLinus Torvalds 		}
38054c91c07cSLorenzo Stoakes 
380606c89946SBaoquan He 		n = vaddr + size - addr;
38074c91c07cSLorenzo Stoakes 		if (n > remains)
38084c91c07cSLorenzo Stoakes 			n = remains;
380906c89946SBaoquan He 
381006c89946SBaoquan He 		if (flags & VMAP_RAM)
38114c91c07cSLorenzo Stoakes 			copied = vmap_ram_vread_iter(iter, addr, n, flags);
381206c89946SBaoquan He 		else if (!(vm->flags & VM_IOREMAP))
38134c91c07cSLorenzo Stoakes 			copied = aligned_vread_iter(iter, addr, n);
3814d0107eb0SKAMEZAWA Hiroyuki 		else /* IOREMAP area is treated as memory hole */
38154c91c07cSLorenzo Stoakes 			copied = zero_iter(iter, n);
38164c91c07cSLorenzo Stoakes 
38174c91c07cSLorenzo Stoakes 		addr += copied;
38184c91c07cSLorenzo Stoakes 		remains -= copied;
38194c91c07cSLorenzo Stoakes 
38204c91c07cSLorenzo Stoakes 		if (copied != n)
38214c91c07cSLorenzo Stoakes 			goto finished;
38221da177e4SLinus Torvalds 	}
38234c91c07cSLorenzo Stoakes 
38244c91c07cSLorenzo Stoakes finished_zero:
38254c91c07cSLorenzo Stoakes 	spin_unlock(&vmap_area_lock);
38264c91c07cSLorenzo Stoakes 	/* zero-fill memory holes */
38274c91c07cSLorenzo Stoakes 	return count - remains + zero_iter(iter, remains);
38281da177e4SLinus Torvalds finished:
38294c91c07cSLorenzo Stoakes 	/* Nothing remains, or We couldn't copy/zero everything. */
3830e81ce85fSJoonsoo Kim 	spin_unlock(&vmap_area_lock);
3831d0107eb0SKAMEZAWA Hiroyuki 
38324c91c07cSLorenzo Stoakes 	return count - remains;
38331da177e4SLinus Torvalds }
38341da177e4SLinus Torvalds 
3835d0107eb0SKAMEZAWA Hiroyuki /**
3836e69e9d4aSHATAYAMA Daisuke  * remap_vmalloc_range_partial - map vmalloc pages to userspace
3837e69e9d4aSHATAYAMA Daisuke  * @vma:		vma to cover
3838e69e9d4aSHATAYAMA Daisuke  * @uaddr:		target user address to start at
3839e69e9d4aSHATAYAMA Daisuke  * @kaddr:		virtual address of vmalloc kernel memory
3840bdebd6a2SJann Horn  * @pgoff:		offset from @kaddr to start at
3841e69e9d4aSHATAYAMA Daisuke  * @size:		size of map area
3842e69e9d4aSHATAYAMA Daisuke  *
3843e69e9d4aSHATAYAMA Daisuke  * Returns:	0 for success, -Exxx on failure
3844e69e9d4aSHATAYAMA Daisuke  *
3845e69e9d4aSHATAYAMA Daisuke  * This function checks that @kaddr is a valid vmalloc'ed area,
3846e69e9d4aSHATAYAMA Daisuke  * and that it is big enough to cover the range starting at
3847e69e9d4aSHATAYAMA Daisuke  * @uaddr in @vma. Will return failure if that criteria isn't
3848e69e9d4aSHATAYAMA Daisuke  * met.
3849e69e9d4aSHATAYAMA Daisuke  *
3850e69e9d4aSHATAYAMA Daisuke  * Similar to remap_pfn_range() (see mm/memory.c)
3851e69e9d4aSHATAYAMA Daisuke  */
3852e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3853bdebd6a2SJann Horn 				void *kaddr, unsigned long pgoff,
3854bdebd6a2SJann Horn 				unsigned long size)
3855e69e9d4aSHATAYAMA Daisuke {
3856e69e9d4aSHATAYAMA Daisuke 	struct vm_struct *area;
3857bdebd6a2SJann Horn 	unsigned long off;
3858bdebd6a2SJann Horn 	unsigned long end_index;
3859bdebd6a2SJann Horn 
3860bdebd6a2SJann Horn 	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3861bdebd6a2SJann Horn 		return -EINVAL;
3862e69e9d4aSHATAYAMA Daisuke 
3863e69e9d4aSHATAYAMA Daisuke 	size = PAGE_ALIGN(size);
3864e69e9d4aSHATAYAMA Daisuke 
3865e69e9d4aSHATAYAMA Daisuke 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3866e69e9d4aSHATAYAMA Daisuke 		return -EINVAL;
3867e69e9d4aSHATAYAMA Daisuke 
3868e69e9d4aSHATAYAMA Daisuke 	area = find_vm_area(kaddr);
3869e69e9d4aSHATAYAMA Daisuke 	if (!area)
3870e69e9d4aSHATAYAMA Daisuke 		return -EINVAL;
3871e69e9d4aSHATAYAMA Daisuke 
3872fe9041c2SChristoph Hellwig 	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3873e69e9d4aSHATAYAMA Daisuke 		return -EINVAL;
3874e69e9d4aSHATAYAMA Daisuke 
3875bdebd6a2SJann Horn 	if (check_add_overflow(size, off, &end_index) ||
3876bdebd6a2SJann Horn 	    end_index > get_vm_area_size(area))
3877e69e9d4aSHATAYAMA Daisuke 		return -EINVAL;
3878bdebd6a2SJann Horn 	kaddr += off;
3879e69e9d4aSHATAYAMA Daisuke 
3880e69e9d4aSHATAYAMA Daisuke 	do {
3881e69e9d4aSHATAYAMA Daisuke 		struct page *page = vmalloc_to_page(kaddr);
3882e69e9d4aSHATAYAMA Daisuke 		int ret;
3883e69e9d4aSHATAYAMA Daisuke 
3884e69e9d4aSHATAYAMA Daisuke 		ret = vm_insert_page(vma, uaddr, page);
3885e69e9d4aSHATAYAMA Daisuke 		if (ret)
3886e69e9d4aSHATAYAMA Daisuke 			return ret;
3887e69e9d4aSHATAYAMA Daisuke 
3888e69e9d4aSHATAYAMA Daisuke 		uaddr += PAGE_SIZE;
3889e69e9d4aSHATAYAMA Daisuke 		kaddr += PAGE_SIZE;
3890e69e9d4aSHATAYAMA Daisuke 		size -= PAGE_SIZE;
3891e69e9d4aSHATAYAMA Daisuke 	} while (size > 0);
3892e69e9d4aSHATAYAMA Daisuke 
38931c71222eSSuren Baghdasaryan 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
3894e69e9d4aSHATAYAMA Daisuke 
3895e69e9d4aSHATAYAMA Daisuke 	return 0;
3896e69e9d4aSHATAYAMA Daisuke }
3897e69e9d4aSHATAYAMA Daisuke 
3898e69e9d4aSHATAYAMA Daisuke /**
389983342314SNick Piggin  * remap_vmalloc_range - map vmalloc pages to userspace
390083342314SNick Piggin  * @vma:		vma to cover (map full range of vma)
390183342314SNick Piggin  * @addr:		vmalloc memory
390283342314SNick Piggin  * @pgoff:		number of pages into addr before first page to map
39037682486bSRandy Dunlap  *
39047682486bSRandy Dunlap  * Returns:	0 for success, -Exxx on failure
390583342314SNick Piggin  *
390683342314SNick Piggin  * This function checks that addr is a valid vmalloc'ed area, and
390783342314SNick Piggin  * that it is big enough to cover the vma. Will return failure if
390883342314SNick Piggin  * that criteria isn't met.
390983342314SNick Piggin  *
391072fd4a35SRobert P. J. Day  * Similar to remap_pfn_range() (see mm/memory.c)
391183342314SNick Piggin  */
391283342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
391383342314SNick Piggin 						unsigned long pgoff)
391483342314SNick Piggin {
3915e69e9d4aSHATAYAMA Daisuke 	return remap_vmalloc_range_partial(vma, vma->vm_start,
3916bdebd6a2SJann Horn 					   addr, pgoff,
3917e69e9d4aSHATAYAMA Daisuke 					   vma->vm_end - vma->vm_start);
391883342314SNick Piggin }
391983342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range);
392083342314SNick Piggin 
39215f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area)
39225f4352fbSJeremy Fitzhardinge {
39235f4352fbSJeremy Fitzhardinge 	struct vm_struct *ret;
39245f4352fbSJeremy Fitzhardinge 	ret = remove_vm_area(area->addr);
39255f4352fbSJeremy Fitzhardinge 	BUG_ON(ret != area);
39265f4352fbSJeremy Fitzhardinge 	kfree(area);
39275f4352fbSJeremy Fitzhardinge }
39285f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area);
3929a10aa579SChristoph Lameter 
39304f8b02b4STejun Heo #ifdef CONFIG_SMP
3931ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n)
3932ca23e405STejun Heo {
39334583e773SGeliang Tang 	return rb_entry_safe(n, struct vmap_area, rb_node);
3934ca23e405STejun Heo }
3935ca23e405STejun Heo 
3936ca23e405STejun Heo /**
393768ad4a33SUladzislau Rezki (Sony)  * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
393868ad4a33SUladzislau Rezki (Sony)  * @addr: target address
3939ca23e405STejun Heo  *
394068ad4a33SUladzislau Rezki (Sony)  * Returns: vmap_area if it is found. If there is no such area
394168ad4a33SUladzislau Rezki (Sony)  *   the first highest(reverse order) vmap_area is returned
394268ad4a33SUladzislau Rezki (Sony)  *   i.e. va->va_start < addr && va->va_end < addr or NULL
394368ad4a33SUladzislau Rezki (Sony)  *   if there are no any areas before @addr.
3944ca23e405STejun Heo  */
394568ad4a33SUladzislau Rezki (Sony) static struct vmap_area *
394668ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr)
3947ca23e405STejun Heo {
394868ad4a33SUladzislau Rezki (Sony) 	struct vmap_area *va, *tmp;
394968ad4a33SUladzislau Rezki (Sony) 	struct rb_node *n;
395068ad4a33SUladzislau Rezki (Sony) 
395168ad4a33SUladzislau Rezki (Sony) 	n = free_vmap_area_root.rb_node;
395268ad4a33SUladzislau Rezki (Sony) 	va = NULL;
3953ca23e405STejun Heo 
3954ca23e405STejun Heo 	while (n) {
395568ad4a33SUladzislau Rezki (Sony) 		tmp = rb_entry(n, struct vmap_area, rb_node);
395668ad4a33SUladzislau Rezki (Sony) 		if (tmp->va_start <= addr) {
395768ad4a33SUladzislau Rezki (Sony) 			va = tmp;
395868ad4a33SUladzislau Rezki (Sony) 			if (tmp->va_end >= addr)
3959ca23e405STejun Heo 				break;
3960ca23e405STejun Heo 
396168ad4a33SUladzislau Rezki (Sony) 			n = n->rb_right;
3962ca23e405STejun Heo 		} else {
396368ad4a33SUladzislau Rezki (Sony) 			n = n->rb_left;
3964ca23e405STejun Heo 		}
396568ad4a33SUladzislau Rezki (Sony) 	}
396668ad4a33SUladzislau Rezki (Sony) 
396768ad4a33SUladzislau Rezki (Sony) 	return va;
3968ca23e405STejun Heo }
3969ca23e405STejun Heo 
3970ca23e405STejun Heo /**
397168ad4a33SUladzislau Rezki (Sony)  * pvm_determine_end_from_reverse - find the highest aligned address
397268ad4a33SUladzislau Rezki (Sony)  * of free block below VMALLOC_END
397368ad4a33SUladzislau Rezki (Sony)  * @va:
397468ad4a33SUladzislau Rezki (Sony)  *   in - the VA we start the search(reverse order);
397568ad4a33SUladzislau Rezki (Sony)  *   out - the VA with the highest aligned end address.
3976799fa85dSAlex Shi  * @align: alignment for required highest address
3977ca23e405STejun Heo  *
397868ad4a33SUladzislau Rezki (Sony)  * Returns: determined end address within vmap_area
3979ca23e405STejun Heo  */
398068ad4a33SUladzislau Rezki (Sony) static unsigned long
398168ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3982ca23e405STejun Heo {
398368ad4a33SUladzislau Rezki (Sony) 	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3984ca23e405STejun Heo 	unsigned long addr;
3985ca23e405STejun Heo 
398668ad4a33SUladzislau Rezki (Sony) 	if (likely(*va)) {
398768ad4a33SUladzislau Rezki (Sony) 		list_for_each_entry_from_reverse((*va),
398868ad4a33SUladzislau Rezki (Sony) 				&free_vmap_area_list, list) {
398968ad4a33SUladzislau Rezki (Sony) 			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
399068ad4a33SUladzislau Rezki (Sony) 			if ((*va)->va_start < addr)
399168ad4a33SUladzislau Rezki (Sony) 				return addr;
399268ad4a33SUladzislau Rezki (Sony) 		}
3993ca23e405STejun Heo 	}
3994ca23e405STejun Heo 
399568ad4a33SUladzislau Rezki (Sony) 	return 0;
3996ca23e405STejun Heo }
3997ca23e405STejun Heo 
3998ca23e405STejun Heo /**
3999ca23e405STejun Heo  * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4000ca23e405STejun Heo  * @offsets: array containing offset of each area
4001ca23e405STejun Heo  * @sizes: array containing size of each area
4002ca23e405STejun Heo  * @nr_vms: the number of areas to allocate
4003ca23e405STejun Heo  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4004ca23e405STejun Heo  *
4005ca23e405STejun Heo  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4006ca23e405STejun Heo  *	    vm_structs on success, %NULL on failure
4007ca23e405STejun Heo  *
4008ca23e405STejun Heo  * Percpu allocator wants to use congruent vm areas so that it can
4009ca23e405STejun Heo  * maintain the offsets among percpu areas.  This function allocates
4010ec3f64fcSDavid Rientjes  * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
4011ec3f64fcSDavid Rientjes  * be scattered pretty far, distance between two areas easily going up
4012ec3f64fcSDavid Rientjes  * to gigabytes.  To avoid interacting with regular vmallocs, these
4013ec3f64fcSDavid Rientjes  * areas are allocated from top.
4014ca23e405STejun Heo  *
4015ca23e405STejun Heo  * Despite its complicated look, this allocator is rather simple. It
401668ad4a33SUladzislau Rezki (Sony)  * does everything top-down and scans free blocks from the end looking
401768ad4a33SUladzislau Rezki (Sony)  * for matching base. While scanning, if any of the areas do not fit the
401868ad4a33SUladzislau Rezki (Sony)  * base address is pulled down to fit the area. Scanning is repeated till
401968ad4a33SUladzislau Rezki (Sony)  * all the areas fit and then all necessary data structures are inserted
402068ad4a33SUladzislau Rezki (Sony)  * and the result is returned.
4021ca23e405STejun Heo  */
4022ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4023ca23e405STejun Heo 				     const size_t *sizes, int nr_vms,
4024ec3f64fcSDavid Rientjes 				     size_t align)
4025ca23e405STejun Heo {
4026ca23e405STejun Heo 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4027ca23e405STejun Heo 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
402868ad4a33SUladzislau Rezki (Sony) 	struct vmap_area **vas, *va;
4029ca23e405STejun Heo 	struct vm_struct **vms;
4030ca23e405STejun Heo 	int area, area2, last_area, term_area;
4031253a496dSDaniel Axtens 	unsigned long base, start, size, end, last_end, orig_start, orig_end;
4032ca23e405STejun Heo 	bool purged = false;
4033ca23e405STejun Heo 
4034ca23e405STejun Heo 	/* verify parameters and allocate data structures */
4035891c49abSAlexander Kuleshov 	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4036ca23e405STejun Heo 	for (last_area = 0, area = 0; area < nr_vms; area++) {
4037ca23e405STejun Heo 		start = offsets[area];
4038ca23e405STejun Heo 		end = start + sizes[area];
4039ca23e405STejun Heo 
4040ca23e405STejun Heo 		/* is everything aligned properly? */
4041ca23e405STejun Heo 		BUG_ON(!IS_ALIGNED(offsets[area], align));
4042ca23e405STejun Heo 		BUG_ON(!IS_ALIGNED(sizes[area], align));
4043ca23e405STejun Heo 
4044ca23e405STejun Heo 		/* detect the area with the highest address */
4045ca23e405STejun Heo 		if (start > offsets[last_area])
4046ca23e405STejun Heo 			last_area = area;
4047ca23e405STejun Heo 
4048c568da28SWei Yang 		for (area2 = area + 1; area2 < nr_vms; area2++) {
4049ca23e405STejun Heo 			unsigned long start2 = offsets[area2];
4050ca23e405STejun Heo 			unsigned long end2 = start2 + sizes[area2];
4051ca23e405STejun Heo 
4052c568da28SWei Yang 			BUG_ON(start2 < end && start < end2);
4053ca23e405STejun Heo 		}
4054ca23e405STejun Heo 	}
4055ca23e405STejun Heo 	last_end = offsets[last_area] + sizes[last_area];
4056ca23e405STejun Heo 
4057ca23e405STejun Heo 	if (vmalloc_end - vmalloc_start < last_end) {
4058ca23e405STejun Heo 		WARN_ON(true);
4059ca23e405STejun Heo 		return NULL;
4060ca23e405STejun Heo 	}
4061ca23e405STejun Heo 
40624d67d860SThomas Meyer 	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
40634d67d860SThomas Meyer 	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4064ca23e405STejun Heo 	if (!vas || !vms)
4065f1db7afdSKautuk Consul 		goto err_free2;
4066ca23e405STejun Heo 
4067ca23e405STejun Heo 	for (area = 0; area < nr_vms; area++) {
406868ad4a33SUladzislau Rezki (Sony) 		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4069ec3f64fcSDavid Rientjes 		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4070ca23e405STejun Heo 		if (!vas[area] || !vms[area])
4071ca23e405STejun Heo 			goto err_free;
4072ca23e405STejun Heo 	}
4073ca23e405STejun Heo retry:
4074e36176beSUladzislau Rezki (Sony) 	spin_lock(&free_vmap_area_lock);
4075ca23e405STejun Heo 
4076ca23e405STejun Heo 	/* start scanning - we scan from the top, begin with the last area */
4077ca23e405STejun Heo 	area = term_area = last_area;
4078ca23e405STejun Heo 	start = offsets[area];
4079ca23e405STejun Heo 	end = start + sizes[area];
4080ca23e405STejun Heo 
408168ad4a33SUladzislau Rezki (Sony) 	va = pvm_find_va_enclose_addr(vmalloc_end);
408268ad4a33SUladzislau Rezki (Sony) 	base = pvm_determine_end_from_reverse(&va, align) - end;
4083ca23e405STejun Heo 
4084ca23e405STejun Heo 	while (true) {
4085ca23e405STejun Heo 		/*
4086ca23e405STejun Heo 		 * base might have underflowed, add last_end before
4087ca23e405STejun Heo 		 * comparing.
4088ca23e405STejun Heo 		 */
408968ad4a33SUladzislau Rezki (Sony) 		if (base + last_end < vmalloc_start + last_end)
409068ad4a33SUladzislau Rezki (Sony) 			goto overflow;
4091ca23e405STejun Heo 
4092ca23e405STejun Heo 		/*
409368ad4a33SUladzislau Rezki (Sony) 		 * Fitting base has not been found.
4094ca23e405STejun Heo 		 */
409568ad4a33SUladzislau Rezki (Sony) 		if (va == NULL)
409668ad4a33SUladzislau Rezki (Sony) 			goto overflow;
4097ca23e405STejun Heo 
4098ca23e405STejun Heo 		/*
4099d8cc323dSQiujun Huang 		 * If required width exceeds current VA block, move
41005336e52cSKuppuswamy Sathyanarayanan 		 * base downwards and then recheck.
41015336e52cSKuppuswamy Sathyanarayanan 		 */
41025336e52cSKuppuswamy Sathyanarayanan 		if (base + end > va->va_end) {
41035336e52cSKuppuswamy Sathyanarayanan 			base = pvm_determine_end_from_reverse(&va, align) - end;
41045336e52cSKuppuswamy Sathyanarayanan 			term_area = area;
41055336e52cSKuppuswamy Sathyanarayanan 			continue;
41065336e52cSKuppuswamy Sathyanarayanan 		}
41075336e52cSKuppuswamy Sathyanarayanan 
41085336e52cSKuppuswamy Sathyanarayanan 		/*
410968ad4a33SUladzislau Rezki (Sony) 		 * If this VA does not fit, move base downwards and recheck.
4110ca23e405STejun Heo 		 */
41115336e52cSKuppuswamy Sathyanarayanan 		if (base + start < va->va_start) {
411268ad4a33SUladzislau Rezki (Sony) 			va = node_to_va(rb_prev(&va->rb_node));
411368ad4a33SUladzislau Rezki (Sony) 			base = pvm_determine_end_from_reverse(&va, align) - end;
4114ca23e405STejun Heo 			term_area = area;
4115ca23e405STejun Heo 			continue;
4116ca23e405STejun Heo 		}
4117ca23e405STejun Heo 
4118ca23e405STejun Heo 		/*
4119ca23e405STejun Heo 		 * This area fits, move on to the previous one.  If
4120ca23e405STejun Heo 		 * the previous one is the terminal one, we're done.
4121ca23e405STejun Heo 		 */
4122ca23e405STejun Heo 		area = (area + nr_vms - 1) % nr_vms;
4123ca23e405STejun Heo 		if (area == term_area)
4124ca23e405STejun Heo 			break;
412568ad4a33SUladzislau Rezki (Sony) 
4126ca23e405STejun Heo 		start = offsets[area];
4127ca23e405STejun Heo 		end = start + sizes[area];
412868ad4a33SUladzislau Rezki (Sony) 		va = pvm_find_va_enclose_addr(base + end);
4129ca23e405STejun Heo 	}
413068ad4a33SUladzislau Rezki (Sony) 
4131ca23e405STejun Heo 	/* we've found a fitting base, insert all va's */
4132ca23e405STejun Heo 	for (area = 0; area < nr_vms; area++) {
413368ad4a33SUladzislau Rezki (Sony) 		int ret;
4134ca23e405STejun Heo 
413568ad4a33SUladzislau Rezki (Sony) 		start = base + offsets[area];
413668ad4a33SUladzislau Rezki (Sony) 		size = sizes[area];
413768ad4a33SUladzislau Rezki (Sony) 
413868ad4a33SUladzislau Rezki (Sony) 		va = pvm_find_va_enclose_addr(start);
413968ad4a33SUladzislau Rezki (Sony) 		if (WARN_ON_ONCE(va == NULL))
414068ad4a33SUladzislau Rezki (Sony) 			/* It is a BUG(), but trigger recovery instead. */
414168ad4a33SUladzislau Rezki (Sony) 			goto recovery;
414268ad4a33SUladzislau Rezki (Sony) 
4143f9863be4SUladzislau Rezki (Sony) 		ret = adjust_va_to_fit_type(&free_vmap_area_root,
4144f9863be4SUladzislau Rezki (Sony) 					    &free_vmap_area_list,
4145f9863be4SUladzislau Rezki (Sony) 					    va, start, size);
41461b23ff80SBaoquan He 		if (WARN_ON_ONCE(unlikely(ret)))
414768ad4a33SUladzislau Rezki (Sony) 			/* It is a BUG(), but trigger recovery instead. */
414868ad4a33SUladzislau Rezki (Sony) 			goto recovery;
414968ad4a33SUladzislau Rezki (Sony) 
415068ad4a33SUladzislau Rezki (Sony) 		/* Allocated area. */
415168ad4a33SUladzislau Rezki (Sony) 		va = vas[area];
415268ad4a33SUladzislau Rezki (Sony) 		va->va_start = start;
415368ad4a33SUladzislau Rezki (Sony) 		va->va_end = start + size;
4154ca23e405STejun Heo 	}
4155ca23e405STejun Heo 
4156e36176beSUladzislau Rezki (Sony) 	spin_unlock(&free_vmap_area_lock);
4157ca23e405STejun Heo 
4158253a496dSDaniel Axtens 	/* populate the kasan shadow space */
4159253a496dSDaniel Axtens 	for (area = 0; area < nr_vms; area++) {
4160253a496dSDaniel Axtens 		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4161253a496dSDaniel Axtens 			goto err_free_shadow;
4162253a496dSDaniel Axtens 	}
4163253a496dSDaniel Axtens 
4164ca23e405STejun Heo 	/* insert all vm's */
4165e36176beSUladzislau Rezki (Sony) 	spin_lock(&vmap_area_lock);
4166e36176beSUladzislau Rezki (Sony) 	for (area = 0; area < nr_vms; area++) {
4167e36176beSUladzislau Rezki (Sony) 		insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
4168e36176beSUladzislau Rezki (Sony) 
4169e36176beSUladzislau Rezki (Sony) 		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
4170ca23e405STejun Heo 				 pcpu_get_vm_areas);
4171e36176beSUladzislau Rezki (Sony) 	}
4172e36176beSUladzislau Rezki (Sony) 	spin_unlock(&vmap_area_lock);
4173ca23e405STejun Heo 
417419f1c3acSAndrey Konovalov 	/*
417519f1c3acSAndrey Konovalov 	 * Mark allocated areas as accessible. Do it now as a best-effort
417619f1c3acSAndrey Konovalov 	 * approach, as they can be mapped outside of vmalloc code.
417723689e91SAndrey Konovalov 	 * With hardware tag-based KASAN, marking is skipped for
417823689e91SAndrey Konovalov 	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
417919f1c3acSAndrey Konovalov 	 */
41801d96320fSAndrey Konovalov 	for (area = 0; area < nr_vms; area++)
41811d96320fSAndrey Konovalov 		vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4182f6e39794SAndrey Konovalov 				vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
41831d96320fSAndrey Konovalov 
4184ca23e405STejun Heo 	kfree(vas);
4185ca23e405STejun Heo 	return vms;
4186ca23e405STejun Heo 
418768ad4a33SUladzislau Rezki (Sony) recovery:
4188e36176beSUladzislau Rezki (Sony) 	/*
4189e36176beSUladzislau Rezki (Sony) 	 * Remove previously allocated areas. There is no
4190e36176beSUladzislau Rezki (Sony) 	 * need in removing these areas from the busy tree,
4191e36176beSUladzislau Rezki (Sony) 	 * because they are inserted only on the final step
4192e36176beSUladzislau Rezki (Sony) 	 * and when pcpu_get_vm_areas() is success.
4193e36176beSUladzislau Rezki (Sony) 	 */
419468ad4a33SUladzislau Rezki (Sony) 	while (area--) {
4195253a496dSDaniel Axtens 		orig_start = vas[area]->va_start;
4196253a496dSDaniel Axtens 		orig_end = vas[area]->va_end;
419796e2db45SUladzislau Rezki (Sony) 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
41983c5c3cfbSDaniel Axtens 				&free_vmap_area_list);
41999c801f61SUladzislau Rezki (Sony) 		if (va)
4200253a496dSDaniel Axtens 			kasan_release_vmalloc(orig_start, orig_end,
4201253a496dSDaniel Axtens 				va->va_start, va->va_end);
420268ad4a33SUladzislau Rezki (Sony) 		vas[area] = NULL;
420368ad4a33SUladzislau Rezki (Sony) 	}
420468ad4a33SUladzislau Rezki (Sony) 
420568ad4a33SUladzislau Rezki (Sony) overflow:
4206e36176beSUladzislau Rezki (Sony) 	spin_unlock(&free_vmap_area_lock);
420768ad4a33SUladzislau Rezki (Sony) 	if (!purged) {
420877e50af0SThomas Gleixner 		reclaim_and_purge_vmap_areas();
420968ad4a33SUladzislau Rezki (Sony) 		purged = true;
421068ad4a33SUladzislau Rezki (Sony) 
421168ad4a33SUladzislau Rezki (Sony) 		/* Before "retry", check if we recover. */
421268ad4a33SUladzislau Rezki (Sony) 		for (area = 0; area < nr_vms; area++) {
421368ad4a33SUladzislau Rezki (Sony) 			if (vas[area])
421468ad4a33SUladzislau Rezki (Sony) 				continue;
421568ad4a33SUladzislau Rezki (Sony) 
421668ad4a33SUladzislau Rezki (Sony) 			vas[area] = kmem_cache_zalloc(
421768ad4a33SUladzislau Rezki (Sony) 				vmap_area_cachep, GFP_KERNEL);
421868ad4a33SUladzislau Rezki (Sony) 			if (!vas[area])
421968ad4a33SUladzislau Rezki (Sony) 				goto err_free;
422068ad4a33SUladzislau Rezki (Sony) 		}
422168ad4a33SUladzislau Rezki (Sony) 
422268ad4a33SUladzislau Rezki (Sony) 		goto retry;
422368ad4a33SUladzislau Rezki (Sony) 	}
422468ad4a33SUladzislau Rezki (Sony) 
4225ca23e405STejun Heo err_free:
4226ca23e405STejun Heo 	for (area = 0; area < nr_vms; area++) {
422768ad4a33SUladzislau Rezki (Sony) 		if (vas[area])
422868ad4a33SUladzislau Rezki (Sony) 			kmem_cache_free(vmap_area_cachep, vas[area]);
422968ad4a33SUladzislau Rezki (Sony) 
4230ca23e405STejun Heo 		kfree(vms[area]);
4231ca23e405STejun Heo 	}
4232f1db7afdSKautuk Consul err_free2:
4233ca23e405STejun Heo 	kfree(vas);
4234ca23e405STejun Heo 	kfree(vms);
4235ca23e405STejun Heo 	return NULL;
4236253a496dSDaniel Axtens 
4237253a496dSDaniel Axtens err_free_shadow:
4238253a496dSDaniel Axtens 	spin_lock(&free_vmap_area_lock);
4239253a496dSDaniel Axtens 	/*
4240253a496dSDaniel Axtens 	 * We release all the vmalloc shadows, even the ones for regions that
4241253a496dSDaniel Axtens 	 * hadn't been successfully added. This relies on kasan_release_vmalloc
4242253a496dSDaniel Axtens 	 * being able to tolerate this case.
4243253a496dSDaniel Axtens 	 */
4244253a496dSDaniel Axtens 	for (area = 0; area < nr_vms; area++) {
4245253a496dSDaniel Axtens 		orig_start = vas[area]->va_start;
4246253a496dSDaniel Axtens 		orig_end = vas[area]->va_end;
424796e2db45SUladzislau Rezki (Sony) 		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4248253a496dSDaniel Axtens 				&free_vmap_area_list);
42499c801f61SUladzislau Rezki (Sony) 		if (va)
4250253a496dSDaniel Axtens 			kasan_release_vmalloc(orig_start, orig_end,
4251253a496dSDaniel Axtens 				va->va_start, va->va_end);
4252253a496dSDaniel Axtens 		vas[area] = NULL;
4253253a496dSDaniel Axtens 		kfree(vms[area]);
4254253a496dSDaniel Axtens 	}
4255253a496dSDaniel Axtens 	spin_unlock(&free_vmap_area_lock);
4256253a496dSDaniel Axtens 	kfree(vas);
4257253a496dSDaniel Axtens 	kfree(vms);
4258253a496dSDaniel Axtens 	return NULL;
4259ca23e405STejun Heo }
4260ca23e405STejun Heo 
4261ca23e405STejun Heo /**
4262ca23e405STejun Heo  * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4263ca23e405STejun Heo  * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4264ca23e405STejun Heo  * @nr_vms: the number of allocated areas
4265ca23e405STejun Heo  *
4266ca23e405STejun Heo  * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4267ca23e405STejun Heo  */
4268ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4269ca23e405STejun Heo {
4270ca23e405STejun Heo 	int i;
4271ca23e405STejun Heo 
4272ca23e405STejun Heo 	for (i = 0; i < nr_vms; i++)
4273ca23e405STejun Heo 		free_vm_area(vms[i]);
4274ca23e405STejun Heo 	kfree(vms);
4275ca23e405STejun Heo }
42764f8b02b4STejun Heo #endif	/* CONFIG_SMP */
4277a10aa579SChristoph Lameter 
42785bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK
427998f18083SPaul E. McKenney bool vmalloc_dump_obj(void *object)
428098f18083SPaul E. McKenney {
428198f18083SPaul E. McKenney 	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
42820818e739SJoel Fernandes (Google) 	const void *caller;
42830818e739SJoel Fernandes (Google) 	struct vm_struct *vm;
42840818e739SJoel Fernandes (Google) 	struct vmap_area *va;
42850818e739SJoel Fernandes (Google) 	unsigned long addr;
42860818e739SJoel Fernandes (Google) 	unsigned int nr_pages;
428798f18083SPaul E. McKenney 
42880818e739SJoel Fernandes (Google) 	if (!spin_trylock(&vmap_area_lock))
428998f18083SPaul E. McKenney 		return false;
42900818e739SJoel Fernandes (Google) 	va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
42910818e739SJoel Fernandes (Google) 	if (!va) {
42920818e739SJoel Fernandes (Google) 		spin_unlock(&vmap_area_lock);
42930818e739SJoel Fernandes (Google) 		return false;
42940818e739SJoel Fernandes (Google) 	}
42950818e739SJoel Fernandes (Google) 
42960818e739SJoel Fernandes (Google) 	vm = va->vm;
42970818e739SJoel Fernandes (Google) 	if (!vm) {
42980818e739SJoel Fernandes (Google) 		spin_unlock(&vmap_area_lock);
42990818e739SJoel Fernandes (Google) 		return false;
43000818e739SJoel Fernandes (Google) 	}
43010818e739SJoel Fernandes (Google) 	addr = (unsigned long)vm->addr;
43020818e739SJoel Fernandes (Google) 	caller = vm->caller;
43030818e739SJoel Fernandes (Google) 	nr_pages = vm->nr_pages;
43040818e739SJoel Fernandes (Google) 	spin_unlock(&vmap_area_lock);
4305bd34dcd4SPaul E. McKenney 	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
43060818e739SJoel Fernandes (Google) 		nr_pages, addr, caller);
430798f18083SPaul E. McKenney 	return true;
430898f18083SPaul E. McKenney }
43095bb1bb35SPaul E. McKenney #endif
431098f18083SPaul E. McKenney 
4311a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS
4312a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos)
4313e36176beSUladzislau Rezki (Sony) 	__acquires(&vmap_purge_lock)
4314d4033afdSJoonsoo Kim 	__acquires(&vmap_area_lock)
4315a10aa579SChristoph Lameter {
4316e36176beSUladzislau Rezki (Sony) 	mutex_lock(&vmap_purge_lock);
4317d4033afdSJoonsoo Kim 	spin_lock(&vmap_area_lock);
4318e36176beSUladzislau Rezki (Sony) 
43193f500069Szijun_hu 	return seq_list_start(&vmap_area_list, *pos);
4320a10aa579SChristoph Lameter }
4321a10aa579SChristoph Lameter 
4322a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4323a10aa579SChristoph Lameter {
43243f500069Szijun_hu 	return seq_list_next(p, &vmap_area_list, pos);
4325a10aa579SChristoph Lameter }
4326a10aa579SChristoph Lameter 
4327a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p)
4328d4033afdSJoonsoo Kim 	__releases(&vmap_area_lock)
43290a7dd4e9SWaiman Long 	__releases(&vmap_purge_lock)
4330a10aa579SChristoph Lameter {
4331d4033afdSJoonsoo Kim 	spin_unlock(&vmap_area_lock);
43320a7dd4e9SWaiman Long 	mutex_unlock(&vmap_purge_lock);
4333a10aa579SChristoph Lameter }
4334a10aa579SChristoph Lameter 
4335a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4336a47a126aSEric Dumazet {
4337e5adfffcSKirill A. Shutemov 	if (IS_ENABLED(CONFIG_NUMA)) {
4338a47a126aSEric Dumazet 		unsigned int nr, *counters = m->private;
433951e50b3aSEric Dumazet 		unsigned int step = 1U << vm_area_page_order(v);
4340a47a126aSEric Dumazet 
4341a47a126aSEric Dumazet 		if (!counters)
4342a47a126aSEric Dumazet 			return;
4343a47a126aSEric Dumazet 
4344af12346cSWanpeng Li 		if (v->flags & VM_UNINITIALIZED)
4345af12346cSWanpeng Li 			return;
43467e5b528bSDmitry Vyukov 		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
43477e5b528bSDmitry Vyukov 		smp_rmb();
4348af12346cSWanpeng Li 
4349a47a126aSEric Dumazet 		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4350a47a126aSEric Dumazet 
435151e50b3aSEric Dumazet 		for (nr = 0; nr < v->nr_pages; nr += step)
435251e50b3aSEric Dumazet 			counters[page_to_nid(v->pages[nr])] += step;
4353a47a126aSEric Dumazet 		for_each_node_state(nr, N_HIGH_MEMORY)
4354a47a126aSEric Dumazet 			if (counters[nr])
4355a47a126aSEric Dumazet 				seq_printf(m, " N%u=%u", nr, counters[nr]);
4356a47a126aSEric Dumazet 	}
4357a47a126aSEric Dumazet }
4358a47a126aSEric Dumazet 
4359dd3b8353SUladzislau Rezki (Sony) static void show_purge_info(struct seq_file *m)
4360dd3b8353SUladzislau Rezki (Sony) {
4361dd3b8353SUladzislau Rezki (Sony) 	struct vmap_area *va;
4362dd3b8353SUladzislau Rezki (Sony) 
436396e2db45SUladzislau Rezki (Sony) 	spin_lock(&purge_vmap_area_lock);
436496e2db45SUladzislau Rezki (Sony) 	list_for_each_entry(va, &purge_vmap_area_list, list) {
4365dd3b8353SUladzislau Rezki (Sony) 		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4366dd3b8353SUladzislau Rezki (Sony) 			(void *)va->va_start, (void *)va->va_end,
4367dd3b8353SUladzislau Rezki (Sony) 			va->va_end - va->va_start);
4368dd3b8353SUladzislau Rezki (Sony) 	}
436996e2db45SUladzislau Rezki (Sony) 	spin_unlock(&purge_vmap_area_lock);
4370dd3b8353SUladzislau Rezki (Sony) }
4371dd3b8353SUladzislau Rezki (Sony) 
4372a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p)
4373a10aa579SChristoph Lameter {
43743f500069Szijun_hu 	struct vmap_area *va;
4375d4033afdSJoonsoo Kim 	struct vm_struct *v;
4376d4033afdSJoonsoo Kim 
43773f500069Szijun_hu 	va = list_entry(p, struct vmap_area, list);
43783f500069Szijun_hu 
4379688fcbfcSPengfei Li 	if (!va->vm) {
4380bba9697bSBaoquan He 		if (va->flags & VMAP_RAM)
4381dd3b8353SUladzislau Rezki (Sony) 			seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
438278c72746SYisheng Xie 				(void *)va->va_start, (void *)va->va_end,
4383dd3b8353SUladzislau Rezki (Sony) 				va->va_end - va->va_start);
438478c72746SYisheng Xie 
43857cc7913eSEric Dumazet 		goto final;
438678c72746SYisheng Xie 	}
4387d4033afdSJoonsoo Kim 
4388d4033afdSJoonsoo Kim 	v = va->vm;
4389a10aa579SChristoph Lameter 
439045ec1690SKees Cook 	seq_printf(m, "0x%pK-0x%pK %7ld",
4391a10aa579SChristoph Lameter 		v->addr, v->addr + v->size, v->size);
4392a10aa579SChristoph Lameter 
439362c70bceSJoe Perches 	if (v->caller)
439462c70bceSJoe Perches 		seq_printf(m, " %pS", v->caller);
439523016969SChristoph Lameter 
4396a10aa579SChristoph Lameter 	if (v->nr_pages)
4397a10aa579SChristoph Lameter 		seq_printf(m, " pages=%d", v->nr_pages);
4398a10aa579SChristoph Lameter 
4399a10aa579SChristoph Lameter 	if (v->phys_addr)
4400199eaa05SMiles Chen 		seq_printf(m, " phys=%pa", &v->phys_addr);
4401a10aa579SChristoph Lameter 
4402a10aa579SChristoph Lameter 	if (v->flags & VM_IOREMAP)
4403f4527c90SFabian Frederick 		seq_puts(m, " ioremap");
4404a10aa579SChristoph Lameter 
4405a10aa579SChristoph Lameter 	if (v->flags & VM_ALLOC)
4406f4527c90SFabian Frederick 		seq_puts(m, " vmalloc");
4407a10aa579SChristoph Lameter 
4408a10aa579SChristoph Lameter 	if (v->flags & VM_MAP)
4409f4527c90SFabian Frederick 		seq_puts(m, " vmap");
4410a10aa579SChristoph Lameter 
4411a10aa579SChristoph Lameter 	if (v->flags & VM_USERMAP)
4412f4527c90SFabian Frederick 		seq_puts(m, " user");
4413a10aa579SChristoph Lameter 
4414fe9041c2SChristoph Hellwig 	if (v->flags & VM_DMA_COHERENT)
4415fe9041c2SChristoph Hellwig 		seq_puts(m, " dma-coherent");
4416fe9041c2SChristoph Hellwig 
4417244d63eeSDavid Rientjes 	if (is_vmalloc_addr(v->pages))
4418f4527c90SFabian Frederick 		seq_puts(m, " vpages");
4419a10aa579SChristoph Lameter 
4420a47a126aSEric Dumazet 	show_numa_info(m, v);
4421a10aa579SChristoph Lameter 	seq_putc(m, '\n');
4422dd3b8353SUladzislau Rezki (Sony) 
4423dd3b8353SUladzislau Rezki (Sony) 	/*
442496e2db45SUladzislau Rezki (Sony) 	 * As a final step, dump "unpurged" areas.
4425dd3b8353SUladzislau Rezki (Sony) 	 */
44267cc7913eSEric Dumazet final:
4427dd3b8353SUladzislau Rezki (Sony) 	if (list_is_last(&va->list, &vmap_area_list))
4428dd3b8353SUladzislau Rezki (Sony) 		show_purge_info(m);
4429dd3b8353SUladzislau Rezki (Sony) 
4430a10aa579SChristoph Lameter 	return 0;
4431a10aa579SChristoph Lameter }
4432a10aa579SChristoph Lameter 
44335f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = {
4434a10aa579SChristoph Lameter 	.start = s_start,
4435a10aa579SChristoph Lameter 	.next = s_next,
4436a10aa579SChristoph Lameter 	.stop = s_stop,
4437a10aa579SChristoph Lameter 	.show = s_show,
4438a10aa579SChristoph Lameter };
44395f6a6a9cSAlexey Dobriyan 
44405f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void)
44415f6a6a9cSAlexey Dobriyan {
4442fddda2b7SChristoph Hellwig 	if (IS_ENABLED(CONFIG_NUMA))
44430825a6f9SJoe Perches 		proc_create_seq_private("vmallocinfo", 0400, NULL,
444444414d82SChristoph Hellwig 				&vmalloc_op,
444544414d82SChristoph Hellwig 				nr_node_ids * sizeof(unsigned int), NULL);
4446fddda2b7SChristoph Hellwig 	else
44470825a6f9SJoe Perches 		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
44485f6a6a9cSAlexey Dobriyan 	return 0;
44495f6a6a9cSAlexey Dobriyan }
44505f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init);
4451db3808c1SJoonsoo Kim 
4452a10aa579SChristoph Lameter #endif
4453208162f4SChristoph Hellwig 
4454208162f4SChristoph Hellwig void __init vmalloc_init(void)
4455208162f4SChristoph Hellwig {
4456208162f4SChristoph Hellwig 	struct vmap_area *va;
4457208162f4SChristoph Hellwig 	struct vm_struct *tmp;
4458208162f4SChristoph Hellwig 	int i;
4459208162f4SChristoph Hellwig 
4460208162f4SChristoph Hellwig 	/*
4461208162f4SChristoph Hellwig 	 * Create the cache for vmap_area objects.
4462208162f4SChristoph Hellwig 	 */
4463208162f4SChristoph Hellwig 	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
4464208162f4SChristoph Hellwig 
4465208162f4SChristoph Hellwig 	for_each_possible_cpu(i) {
4466208162f4SChristoph Hellwig 		struct vmap_block_queue *vbq;
4467208162f4SChristoph Hellwig 		struct vfree_deferred *p;
4468208162f4SChristoph Hellwig 
4469208162f4SChristoph Hellwig 		vbq = &per_cpu(vmap_block_queue, i);
4470208162f4SChristoph Hellwig 		spin_lock_init(&vbq->lock);
4471208162f4SChristoph Hellwig 		INIT_LIST_HEAD(&vbq->free);
4472208162f4SChristoph Hellwig 		p = &per_cpu(vfree_deferred, i);
4473208162f4SChristoph Hellwig 		init_llist_head(&p->list);
4474208162f4SChristoph Hellwig 		INIT_WORK(&p->wq, delayed_vfree_work);
4475062eacf5SUladzislau Rezki (Sony) 		xa_init(&vbq->vmap_blocks);
4476208162f4SChristoph Hellwig 	}
4477208162f4SChristoph Hellwig 
4478208162f4SChristoph Hellwig 	/* Import existing vmlist entries. */
4479208162f4SChristoph Hellwig 	for (tmp = vmlist; tmp; tmp = tmp->next) {
4480208162f4SChristoph Hellwig 		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4481208162f4SChristoph Hellwig 		if (WARN_ON_ONCE(!va))
4482208162f4SChristoph Hellwig 			continue;
4483208162f4SChristoph Hellwig 
4484208162f4SChristoph Hellwig 		va->va_start = (unsigned long)tmp->addr;
4485208162f4SChristoph Hellwig 		va->va_end = va->va_start + tmp->size;
4486208162f4SChristoph Hellwig 		va->vm = tmp;
4487208162f4SChristoph Hellwig 		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
4488208162f4SChristoph Hellwig 	}
4489208162f4SChristoph Hellwig 
4490208162f4SChristoph Hellwig 	/*
4491208162f4SChristoph Hellwig 	 * Now we can initialize a free vmap space.
4492208162f4SChristoph Hellwig 	 */
4493208162f4SChristoph Hellwig 	vmap_init_free_space();
4494208162f4SChristoph Hellwig 	vmap_initialized = true;
4495208162f4SChristoph Hellwig }
4496