1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds
41da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
51da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
61da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005
8d758ffe6SUladzislau Rezki (Sony) * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
91da177e4SLinus Torvalds */
101da177e4SLinus Torvalds
11db64fe02SNick Piggin #include <linux/vmalloc.h>
121da177e4SLinus Torvalds #include <linux/mm.h>
131da177e4SLinus Torvalds #include <linux/module.h>
141da177e4SLinus Torvalds #include <linux/highmem.h>
15c3edc401SIngo Molnar #include <linux/sched/signal.h>
161da177e4SLinus Torvalds #include <linux/slab.h>
171da177e4SLinus Torvalds #include <linux/spinlock.h>
181da177e4SLinus Torvalds #include <linux/interrupt.h>
195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h>
20a10aa579SChristoph Lameter #include <linux/seq_file.h>
21868b104dSRick Edgecombe #include <linux/set_memory.h>
223ac7fe5aSThomas Gleixner #include <linux/debugobjects.h>
2323016969SChristoph Lameter #include <linux/kallsyms.h>
24db64fe02SNick Piggin #include <linux/list.h>
254da56b99SChris Wilson #include <linux/notifier.h>
26db64fe02SNick Piggin #include <linux/rbtree.h>
270f14599cSMatthew Wilcox (Oracle) #include <linux/xarray.h>
285da96bddSMel Gorman #include <linux/io.h>
29db64fe02SNick Piggin #include <linux/rcupdate.h>
30f0aa6617STejun Heo #include <linux/pfn.h>
3189219d37SCatalin Marinas #include <linux/kmemleak.h>
3260063497SArun Sharma #include <linux/atomic.h>
333b32123dSGideon Israel Dsouza #include <linux/compiler.h>
344e5aa1f4SShakeel Butt #include <linux/memcontrol.h>
3532fcfd40SAl Viro #include <linux/llist.h>
364c91c07cSLorenzo Stoakes #include <linux/uio.h>
370f616be1SToshi Kani #include <linux/bitops.h>
3868ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h>
39bdebd6a2SJann Horn #include <linux/overflow.h>
40c0eb315aSNicholas Piggin #include <linux/pgtable.h>
41f7ee1f13SChristophe Leroy #include <linux/hugetlb.h>
42451769ebSMichal Hocko #include <linux/sched/mm.h>
431da177e4SLinus Torvalds #include <asm/tlbflush.h>
442dca6999SDavid Miller #include <asm/shmparam.h>
451da177e4SLinus Torvalds
46cf243da6SUladzislau Rezki (Sony) #define CREATE_TRACE_POINTS
47cf243da6SUladzislau Rezki (Sony) #include <trace/events/vmalloc.h>
48cf243da6SUladzislau Rezki (Sony)
49dd56b046SMel Gorman #include "internal.h"
502a681cfaSJoerg Roedel #include "pgalloc-track.h"
51dd56b046SMel Gorman
5282a70ce0SChristoph Hellwig #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
5382a70ce0SChristoph Hellwig static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
5482a70ce0SChristoph Hellwig
set_nohugeiomap(char * str)5582a70ce0SChristoph Hellwig static int __init set_nohugeiomap(char *str)
5682a70ce0SChristoph Hellwig {
5782a70ce0SChristoph Hellwig ioremap_max_page_shift = PAGE_SHIFT;
5882a70ce0SChristoph Hellwig return 0;
5982a70ce0SChristoph Hellwig }
6082a70ce0SChristoph Hellwig early_param("nohugeiomap", set_nohugeiomap);
6182a70ce0SChristoph Hellwig #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
6282a70ce0SChristoph Hellwig static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
6382a70ce0SChristoph Hellwig #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
6482a70ce0SChristoph Hellwig
65121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
66121e6f32SNicholas Piggin static bool __ro_after_init vmap_allow_huge = true;
67121e6f32SNicholas Piggin
set_nohugevmalloc(char * str)68121e6f32SNicholas Piggin static int __init set_nohugevmalloc(char *str)
69121e6f32SNicholas Piggin {
70121e6f32SNicholas Piggin vmap_allow_huge = false;
71121e6f32SNicholas Piggin return 0;
72121e6f32SNicholas Piggin }
73121e6f32SNicholas Piggin early_param("nohugevmalloc", set_nohugevmalloc);
74121e6f32SNicholas Piggin #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
75121e6f32SNicholas Piggin static const bool vmap_allow_huge = false;
76121e6f32SNicholas Piggin #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
77121e6f32SNicholas Piggin
is_vmalloc_addr(const void * x)78186525bdSIngo Molnar bool is_vmalloc_addr(const void *x)
79186525bdSIngo Molnar {
804aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x);
81186525bdSIngo Molnar
82186525bdSIngo Molnar return addr >= VMALLOC_START && addr < VMALLOC_END;
83186525bdSIngo Molnar }
84186525bdSIngo Molnar EXPORT_SYMBOL(is_vmalloc_addr);
85186525bdSIngo Molnar
8632fcfd40SAl Viro struct vfree_deferred {
8732fcfd40SAl Viro struct llist_head list;
8832fcfd40SAl Viro struct work_struct wq;
8932fcfd40SAl Viro };
9032fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
9132fcfd40SAl Viro
92db64fe02SNick Piggin /*** Page table manipulation functions ***/
vmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)935e9e3d77SNicholas Piggin static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
945e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
95f7ee1f13SChristophe Leroy unsigned int max_page_shift, pgtbl_mod_mask *mask)
965e9e3d77SNicholas Piggin {
975e9e3d77SNicholas Piggin pte_t *pte;
985e9e3d77SNicholas Piggin u64 pfn;
99f7ee1f13SChristophe Leroy unsigned long size = PAGE_SIZE;
1005e9e3d77SNicholas Piggin
1015e9e3d77SNicholas Piggin pfn = phys_addr >> PAGE_SHIFT;
1025e9e3d77SNicholas Piggin pte = pte_alloc_kernel_track(pmd, addr, mask);
1035e9e3d77SNicholas Piggin if (!pte)
1045e9e3d77SNicholas Piggin return -ENOMEM;
1055e9e3d77SNicholas Piggin do {
106c33c7948SRyan Roberts BUG_ON(!pte_none(ptep_get(pte)));
107f7ee1f13SChristophe Leroy
108f7ee1f13SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE
109f7ee1f13SChristophe Leroy size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
110f7ee1f13SChristophe Leroy if (size != PAGE_SIZE) {
111f7ee1f13SChristophe Leroy pte_t entry = pfn_pte(pfn, prot);
112f7ee1f13SChristophe Leroy
113f7ee1f13SChristophe Leroy entry = arch_make_huge_pte(entry, ilog2(size), 0);
114935d4f0cSRyan Roberts set_huge_pte_at(&init_mm, addr, pte, entry, size);
115f7ee1f13SChristophe Leroy pfn += PFN_DOWN(size);
116f7ee1f13SChristophe Leroy continue;
117f7ee1f13SChristophe Leroy }
118f7ee1f13SChristophe Leroy #endif
1195e9e3d77SNicholas Piggin set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
1205e9e3d77SNicholas Piggin pfn++;
121f7ee1f13SChristophe Leroy } while (pte += PFN_DOWN(size), addr += size, addr != end);
1225e9e3d77SNicholas Piggin *mask |= PGTBL_PTE_MODIFIED;
1235e9e3d77SNicholas Piggin return 0;
1245e9e3d77SNicholas Piggin }
1255e9e3d77SNicholas Piggin
vmap_try_huge_pmd(pmd_t * pmd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)1265e9e3d77SNicholas Piggin static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
1275e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
1285e9e3d77SNicholas Piggin unsigned int max_page_shift)
1295e9e3d77SNicholas Piggin {
1305e9e3d77SNicholas Piggin if (max_page_shift < PMD_SHIFT)
1315e9e3d77SNicholas Piggin return 0;
1325e9e3d77SNicholas Piggin
1335e9e3d77SNicholas Piggin if (!arch_vmap_pmd_supported(prot))
1345e9e3d77SNicholas Piggin return 0;
1355e9e3d77SNicholas Piggin
1365e9e3d77SNicholas Piggin if ((end - addr) != PMD_SIZE)
1375e9e3d77SNicholas Piggin return 0;
1385e9e3d77SNicholas Piggin
1395e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PMD_SIZE))
1405e9e3d77SNicholas Piggin return 0;
1415e9e3d77SNicholas Piggin
1425e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PMD_SIZE))
1435e9e3d77SNicholas Piggin return 0;
1445e9e3d77SNicholas Piggin
1455e9e3d77SNicholas Piggin if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
1465e9e3d77SNicholas Piggin return 0;
1475e9e3d77SNicholas Piggin
1485e9e3d77SNicholas Piggin return pmd_set_huge(pmd, phys_addr, prot);
1495e9e3d77SNicholas Piggin }
1505e9e3d77SNicholas Piggin
vmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)1515e9e3d77SNicholas Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
1525e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
1535e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask)
1545e9e3d77SNicholas Piggin {
1555e9e3d77SNicholas Piggin pmd_t *pmd;
1565e9e3d77SNicholas Piggin unsigned long next;
1575e9e3d77SNicholas Piggin
1585e9e3d77SNicholas Piggin pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
1595e9e3d77SNicholas Piggin if (!pmd)
1605e9e3d77SNicholas Piggin return -ENOMEM;
1615e9e3d77SNicholas Piggin do {
1625e9e3d77SNicholas Piggin next = pmd_addr_end(addr, end);
1635e9e3d77SNicholas Piggin
1645e9e3d77SNicholas Piggin if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
1655e9e3d77SNicholas Piggin max_page_shift)) {
1665e9e3d77SNicholas Piggin *mask |= PGTBL_PMD_MODIFIED;
1675e9e3d77SNicholas Piggin continue;
1685e9e3d77SNicholas Piggin }
1695e9e3d77SNicholas Piggin
170f7ee1f13SChristophe Leroy if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
1715e9e3d77SNicholas Piggin return -ENOMEM;
1725e9e3d77SNicholas Piggin } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
1735e9e3d77SNicholas Piggin return 0;
1745e9e3d77SNicholas Piggin }
1755e9e3d77SNicholas Piggin
vmap_try_huge_pud(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)1765e9e3d77SNicholas Piggin static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
1775e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
1785e9e3d77SNicholas Piggin unsigned int max_page_shift)
1795e9e3d77SNicholas Piggin {
1805e9e3d77SNicholas Piggin if (max_page_shift < PUD_SHIFT)
1815e9e3d77SNicholas Piggin return 0;
1825e9e3d77SNicholas Piggin
1835e9e3d77SNicholas Piggin if (!arch_vmap_pud_supported(prot))
1845e9e3d77SNicholas Piggin return 0;
1855e9e3d77SNicholas Piggin
1865e9e3d77SNicholas Piggin if ((end - addr) != PUD_SIZE)
1875e9e3d77SNicholas Piggin return 0;
1885e9e3d77SNicholas Piggin
1895e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PUD_SIZE))
1905e9e3d77SNicholas Piggin return 0;
1915e9e3d77SNicholas Piggin
1925e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PUD_SIZE))
1935e9e3d77SNicholas Piggin return 0;
1945e9e3d77SNicholas Piggin
1955e9e3d77SNicholas Piggin if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
1965e9e3d77SNicholas Piggin return 0;
1975e9e3d77SNicholas Piggin
1985e9e3d77SNicholas Piggin return pud_set_huge(pud, phys_addr, prot);
1995e9e3d77SNicholas Piggin }
2005e9e3d77SNicholas Piggin
vmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)2015e9e3d77SNicholas Piggin static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
2025e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
2035e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask)
2045e9e3d77SNicholas Piggin {
2055e9e3d77SNicholas Piggin pud_t *pud;
2065e9e3d77SNicholas Piggin unsigned long next;
2075e9e3d77SNicholas Piggin
2085e9e3d77SNicholas Piggin pud = pud_alloc_track(&init_mm, p4d, addr, mask);
2095e9e3d77SNicholas Piggin if (!pud)
2105e9e3d77SNicholas Piggin return -ENOMEM;
2115e9e3d77SNicholas Piggin do {
2125e9e3d77SNicholas Piggin next = pud_addr_end(addr, end);
2135e9e3d77SNicholas Piggin
2145e9e3d77SNicholas Piggin if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
2155e9e3d77SNicholas Piggin max_page_shift)) {
2165e9e3d77SNicholas Piggin *mask |= PGTBL_PUD_MODIFIED;
2175e9e3d77SNicholas Piggin continue;
2185e9e3d77SNicholas Piggin }
2195e9e3d77SNicholas Piggin
2205e9e3d77SNicholas Piggin if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
2215e9e3d77SNicholas Piggin max_page_shift, mask))
2225e9e3d77SNicholas Piggin return -ENOMEM;
2235e9e3d77SNicholas Piggin } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
2245e9e3d77SNicholas Piggin return 0;
2255e9e3d77SNicholas Piggin }
2265e9e3d77SNicholas Piggin
vmap_try_huge_p4d(p4d_t * p4d,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)2275e9e3d77SNicholas Piggin static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
2285e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
2295e9e3d77SNicholas Piggin unsigned int max_page_shift)
2305e9e3d77SNicholas Piggin {
2315e9e3d77SNicholas Piggin if (max_page_shift < P4D_SHIFT)
2325e9e3d77SNicholas Piggin return 0;
2335e9e3d77SNicholas Piggin
2345e9e3d77SNicholas Piggin if (!arch_vmap_p4d_supported(prot))
2355e9e3d77SNicholas Piggin return 0;
2365e9e3d77SNicholas Piggin
2375e9e3d77SNicholas Piggin if ((end - addr) != P4D_SIZE)
2385e9e3d77SNicholas Piggin return 0;
2395e9e3d77SNicholas Piggin
2405e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, P4D_SIZE))
2415e9e3d77SNicholas Piggin return 0;
2425e9e3d77SNicholas Piggin
2435e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, P4D_SIZE))
2445e9e3d77SNicholas Piggin return 0;
2455e9e3d77SNicholas Piggin
2465e9e3d77SNicholas Piggin if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
2475e9e3d77SNicholas Piggin return 0;
2485e9e3d77SNicholas Piggin
2495e9e3d77SNicholas Piggin return p4d_set_huge(p4d, phys_addr, prot);
2505e9e3d77SNicholas Piggin }
2515e9e3d77SNicholas Piggin
vmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift,pgtbl_mod_mask * mask)2525e9e3d77SNicholas Piggin static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
2535e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
2545e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask)
2555e9e3d77SNicholas Piggin {
2565e9e3d77SNicholas Piggin p4d_t *p4d;
2575e9e3d77SNicholas Piggin unsigned long next;
2585e9e3d77SNicholas Piggin
2595e9e3d77SNicholas Piggin p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
2605e9e3d77SNicholas Piggin if (!p4d)
2615e9e3d77SNicholas Piggin return -ENOMEM;
2625e9e3d77SNicholas Piggin do {
2635e9e3d77SNicholas Piggin next = p4d_addr_end(addr, end);
2645e9e3d77SNicholas Piggin
2655e9e3d77SNicholas Piggin if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
2665e9e3d77SNicholas Piggin max_page_shift)) {
2675e9e3d77SNicholas Piggin *mask |= PGTBL_P4D_MODIFIED;
2685e9e3d77SNicholas Piggin continue;
2695e9e3d77SNicholas Piggin }
2705e9e3d77SNicholas Piggin
2715e9e3d77SNicholas Piggin if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
2725e9e3d77SNicholas Piggin max_page_shift, mask))
2735e9e3d77SNicholas Piggin return -ENOMEM;
2745e9e3d77SNicholas Piggin } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
2755e9e3d77SNicholas Piggin return 0;
2765e9e3d77SNicholas Piggin }
2775e9e3d77SNicholas Piggin
vmap_range_noflush(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int max_page_shift)2785d87510dSNicholas Piggin static int vmap_range_noflush(unsigned long addr, unsigned long end,
2795e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot,
2805e9e3d77SNicholas Piggin unsigned int max_page_shift)
2815e9e3d77SNicholas Piggin {
2825e9e3d77SNicholas Piggin pgd_t *pgd;
2835e9e3d77SNicholas Piggin unsigned long start;
2845e9e3d77SNicholas Piggin unsigned long next;
2855e9e3d77SNicholas Piggin int err;
2865e9e3d77SNicholas Piggin pgtbl_mod_mask mask = 0;
2875e9e3d77SNicholas Piggin
2885e9e3d77SNicholas Piggin might_sleep();
2895e9e3d77SNicholas Piggin BUG_ON(addr >= end);
2905e9e3d77SNicholas Piggin
2915e9e3d77SNicholas Piggin start = addr;
2925e9e3d77SNicholas Piggin pgd = pgd_offset_k(addr);
2935e9e3d77SNicholas Piggin do {
2945e9e3d77SNicholas Piggin next = pgd_addr_end(addr, end);
2955e9e3d77SNicholas Piggin err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
2965e9e3d77SNicholas Piggin max_page_shift, &mask);
2975e9e3d77SNicholas Piggin if (err)
2985e9e3d77SNicholas Piggin break;
2995e9e3d77SNicholas Piggin } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
3005e9e3d77SNicholas Piggin
3015e9e3d77SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
3025e9e3d77SNicholas Piggin arch_sync_kernel_mappings(start, end);
3035e9e3d77SNicholas Piggin
3045e9e3d77SNicholas Piggin return err;
3055e9e3d77SNicholas Piggin }
306b221385bSAdrian Bunk
ioremap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)30782a70ce0SChristoph Hellwig int ioremap_page_range(unsigned long addr, unsigned long end,
30882a70ce0SChristoph Hellwig phys_addr_t phys_addr, pgprot_t prot)
3095d87510dSNicholas Piggin {
3105d87510dSNicholas Piggin int err;
3115d87510dSNicholas Piggin
3128491502fSChristoph Hellwig err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
31382a70ce0SChristoph Hellwig ioremap_max_page_shift);
3145d87510dSNicholas Piggin flush_cache_vmap(addr, end);
315b073d7f8SAlexander Potapenko if (!err)
316fdea03e1SAlexander Potapenko err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
317b073d7f8SAlexander Potapenko ioremap_max_page_shift);
3185d87510dSNicholas Piggin return err;
3195d87510dSNicholas Piggin }
3205d87510dSNicholas Piggin
vunmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)3212ba3e694SJoerg Roedel static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
3222ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
3231da177e4SLinus Torvalds {
3241da177e4SLinus Torvalds pte_t *pte;
3251da177e4SLinus Torvalds
3261da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr);
3271da177e4SLinus Torvalds do {
3281da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
3291da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent));
3301da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end);
3312ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED;
3321da177e4SLinus Torvalds }
3331da177e4SLinus Torvalds
vunmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)3342ba3e694SJoerg Roedel static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
3352ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
3361da177e4SLinus Torvalds {
3371da177e4SLinus Torvalds pmd_t *pmd;
3381da177e4SLinus Torvalds unsigned long next;
3392ba3e694SJoerg Roedel int cleared;
3401da177e4SLinus Torvalds
3411da177e4SLinus Torvalds pmd = pmd_offset(pud, addr);
3421da177e4SLinus Torvalds do {
3431da177e4SLinus Torvalds next = pmd_addr_end(addr, end);
3442ba3e694SJoerg Roedel
3452ba3e694SJoerg Roedel cleared = pmd_clear_huge(pmd);
3462ba3e694SJoerg Roedel if (cleared || pmd_bad(*pmd))
3472ba3e694SJoerg Roedel *mask |= PGTBL_PMD_MODIFIED;
3482ba3e694SJoerg Roedel
3492ba3e694SJoerg Roedel if (cleared)
350b9820d8fSToshi Kani continue;
3511da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd))
3521da177e4SLinus Torvalds continue;
3532ba3e694SJoerg Roedel vunmap_pte_range(pmd, addr, next, mask);
354e47110e9SAneesh Kumar K.V
355e47110e9SAneesh Kumar K.V cond_resched();
3561da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end);
3571da177e4SLinus Torvalds }
3581da177e4SLinus Torvalds
vunmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)3592ba3e694SJoerg Roedel static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
3602ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
3611da177e4SLinus Torvalds {
3621da177e4SLinus Torvalds pud_t *pud;
3631da177e4SLinus Torvalds unsigned long next;
3642ba3e694SJoerg Roedel int cleared;
3651da177e4SLinus Torvalds
366c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr);
3671da177e4SLinus Torvalds do {
3681da177e4SLinus Torvalds next = pud_addr_end(addr, end);
3692ba3e694SJoerg Roedel
3702ba3e694SJoerg Roedel cleared = pud_clear_huge(pud);
3712ba3e694SJoerg Roedel if (cleared || pud_bad(*pud))
3722ba3e694SJoerg Roedel *mask |= PGTBL_PUD_MODIFIED;
3732ba3e694SJoerg Roedel
3742ba3e694SJoerg Roedel if (cleared)
375b9820d8fSToshi Kani continue;
3761da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud))
3771da177e4SLinus Torvalds continue;
3782ba3e694SJoerg Roedel vunmap_pmd_range(pud, addr, next, mask);
3791da177e4SLinus Torvalds } while (pud++, addr = next, addr != end);
3801da177e4SLinus Torvalds }
3811da177e4SLinus Torvalds
vunmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgtbl_mod_mask * mask)3822ba3e694SJoerg Roedel static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
3832ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
384c2febafcSKirill A. Shutemov {
385c2febafcSKirill A. Shutemov p4d_t *p4d;
386c2febafcSKirill A. Shutemov unsigned long next;
387c2febafcSKirill A. Shutemov
388c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr);
389c2febafcSKirill A. Shutemov do {
390c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end);
3912ba3e694SJoerg Roedel
392c8db8c26SLi kunyu p4d_clear_huge(p4d);
393c8db8c26SLi kunyu if (p4d_bad(*p4d))
3942ba3e694SJoerg Roedel *mask |= PGTBL_P4D_MODIFIED;
3952ba3e694SJoerg Roedel
396c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d))
397c2febafcSKirill A. Shutemov continue;
3982ba3e694SJoerg Roedel vunmap_pud_range(p4d, addr, next, mask);
399c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end);
400c2febafcSKirill A. Shutemov }
401c2febafcSKirill A. Shutemov
4024ad0ae8cSNicholas Piggin /*
4034ad0ae8cSNicholas Piggin * vunmap_range_noflush is similar to vunmap_range, but does not
4044ad0ae8cSNicholas Piggin * flush caches or TLBs.
405b521c43fSChristoph Hellwig *
4064ad0ae8cSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() before calling
4074ad0ae8cSNicholas Piggin * this function, and flush_tlb_kernel_range after it has returned
4084ad0ae8cSNicholas Piggin * successfully (and before the addresses are expected to cause a page fault
4094ad0ae8cSNicholas Piggin * or be re-mapped for something else, if TLB flushes are being delayed or
4104ad0ae8cSNicholas Piggin * coalesced).
411b521c43fSChristoph Hellwig *
4124ad0ae8cSNicholas Piggin * This is an internal function only. Do not use outside mm/.
413b521c43fSChristoph Hellwig */
__vunmap_range_noflush(unsigned long start,unsigned long end)414b073d7f8SAlexander Potapenko void __vunmap_range_noflush(unsigned long start, unsigned long end)
4151da177e4SLinus Torvalds {
4161da177e4SLinus Torvalds unsigned long next;
417b521c43fSChristoph Hellwig pgd_t *pgd;
4182ba3e694SJoerg Roedel unsigned long addr = start;
4192ba3e694SJoerg Roedel pgtbl_mod_mask mask = 0;
4201da177e4SLinus Torvalds
4211da177e4SLinus Torvalds BUG_ON(addr >= end);
4221da177e4SLinus Torvalds pgd = pgd_offset_k(addr);
4231da177e4SLinus Torvalds do {
4241da177e4SLinus Torvalds next = pgd_addr_end(addr, end);
4252ba3e694SJoerg Roedel if (pgd_bad(*pgd))
4262ba3e694SJoerg Roedel mask |= PGTBL_PGD_MODIFIED;
4271da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd))
4281da177e4SLinus Torvalds continue;
4292ba3e694SJoerg Roedel vunmap_p4d_range(pgd, addr, next, &mask);
4301da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end);
4312ba3e694SJoerg Roedel
4322ba3e694SJoerg Roedel if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
4332ba3e694SJoerg Roedel arch_sync_kernel_mappings(start, end);
4341da177e4SLinus Torvalds }
4351da177e4SLinus Torvalds
vunmap_range_noflush(unsigned long start,unsigned long end)436b073d7f8SAlexander Potapenko void vunmap_range_noflush(unsigned long start, unsigned long end)
437b073d7f8SAlexander Potapenko {
438b073d7f8SAlexander Potapenko kmsan_vunmap_range_noflush(start, end);
439b073d7f8SAlexander Potapenko __vunmap_range_noflush(start, end);
440b073d7f8SAlexander Potapenko }
441b073d7f8SAlexander Potapenko
4424ad0ae8cSNicholas Piggin /**
4434ad0ae8cSNicholas Piggin * vunmap_range - unmap kernel virtual addresses
4444ad0ae8cSNicholas Piggin * @addr: start of the VM area to unmap
4454ad0ae8cSNicholas Piggin * @end: end of the VM area to unmap (non-inclusive)
4464ad0ae8cSNicholas Piggin *
4474ad0ae8cSNicholas Piggin * Clears any present PTEs in the virtual address range, flushes TLBs and
4484ad0ae8cSNicholas Piggin * caches. Any subsequent access to the address before it has been re-mapped
4494ad0ae8cSNicholas Piggin * is a kernel bug.
4504ad0ae8cSNicholas Piggin */
vunmap_range(unsigned long addr,unsigned long end)4514ad0ae8cSNicholas Piggin void vunmap_range(unsigned long addr, unsigned long end)
4524ad0ae8cSNicholas Piggin {
4534ad0ae8cSNicholas Piggin flush_cache_vunmap(addr, end);
4544ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, end);
4554ad0ae8cSNicholas Piggin flush_tlb_kernel_range(addr, end);
4564ad0ae8cSNicholas Piggin }
4574ad0ae8cSNicholas Piggin
vmap_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)4580a264884SNicholas Piggin static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
4592ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr,
4602ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
4611da177e4SLinus Torvalds {
4621da177e4SLinus Torvalds pte_t *pte;
4631da177e4SLinus Torvalds
464db64fe02SNick Piggin /*
465db64fe02SNick Piggin * nr is a running index into the array which helps higher level
466db64fe02SNick Piggin * callers keep track of where we're up to.
467db64fe02SNick Piggin */
468db64fe02SNick Piggin
4692ba3e694SJoerg Roedel pte = pte_alloc_kernel_track(pmd, addr, mask);
4701da177e4SLinus Torvalds if (!pte)
4711da177e4SLinus Torvalds return -ENOMEM;
4721da177e4SLinus Torvalds do {
473db64fe02SNick Piggin struct page *page = pages[*nr];
474db64fe02SNick Piggin
475c33c7948SRyan Roberts if (WARN_ON(!pte_none(ptep_get(pte))))
476db64fe02SNick Piggin return -EBUSY;
477db64fe02SNick Piggin if (WARN_ON(!page))
4781da177e4SLinus Torvalds return -ENOMEM;
4794fcdcc12SYury Norov if (WARN_ON(!pfn_valid(page_to_pfn(page))))
4804fcdcc12SYury Norov return -EINVAL;
4814fcdcc12SYury Norov
4821da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
483db64fe02SNick Piggin (*nr)++;
4841da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end);
4852ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED;
4861da177e4SLinus Torvalds return 0;
4871da177e4SLinus Torvalds }
4881da177e4SLinus Torvalds
vmap_pages_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)4890a264884SNicholas Piggin static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
4902ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr,
4912ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
4921da177e4SLinus Torvalds {
4931da177e4SLinus Torvalds pmd_t *pmd;
4941da177e4SLinus Torvalds unsigned long next;
4951da177e4SLinus Torvalds
4962ba3e694SJoerg Roedel pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
4971da177e4SLinus Torvalds if (!pmd)
4981da177e4SLinus Torvalds return -ENOMEM;
4991da177e4SLinus Torvalds do {
5001da177e4SLinus Torvalds next = pmd_addr_end(addr, end);
5010a264884SNicholas Piggin if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
5021da177e4SLinus Torvalds return -ENOMEM;
5031da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end);
5041da177e4SLinus Torvalds return 0;
5051da177e4SLinus Torvalds }
5061da177e4SLinus Torvalds
vmap_pages_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)5070a264884SNicholas Piggin static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
5082ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr,
5092ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
5101da177e4SLinus Torvalds {
5111da177e4SLinus Torvalds pud_t *pud;
5121da177e4SLinus Torvalds unsigned long next;
5131da177e4SLinus Torvalds
5142ba3e694SJoerg Roedel pud = pud_alloc_track(&init_mm, p4d, addr, mask);
5151da177e4SLinus Torvalds if (!pud)
5161da177e4SLinus Torvalds return -ENOMEM;
5171da177e4SLinus Torvalds do {
5181da177e4SLinus Torvalds next = pud_addr_end(addr, end);
5190a264884SNicholas Piggin if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
5201da177e4SLinus Torvalds return -ENOMEM;
5211da177e4SLinus Torvalds } while (pud++, addr = next, addr != end);
5221da177e4SLinus Torvalds return 0;
5231da177e4SLinus Torvalds }
5241da177e4SLinus Torvalds
vmap_pages_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,int * nr,pgtbl_mod_mask * mask)5250a264884SNicholas Piggin static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
5262ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr,
5272ba3e694SJoerg Roedel pgtbl_mod_mask *mask)
528c2febafcSKirill A. Shutemov {
529c2febafcSKirill A. Shutemov p4d_t *p4d;
530c2febafcSKirill A. Shutemov unsigned long next;
531c2febafcSKirill A. Shutemov
5322ba3e694SJoerg Roedel p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
533c2febafcSKirill A. Shutemov if (!p4d)
534c2febafcSKirill A. Shutemov return -ENOMEM;
535c2febafcSKirill A. Shutemov do {
536c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end);
5370a264884SNicholas Piggin if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
538c2febafcSKirill A. Shutemov return -ENOMEM;
539c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end);
540c2febafcSKirill A. Shutemov return 0;
541c2febafcSKirill A. Shutemov }
542c2febafcSKirill A. Shutemov
vmap_small_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages)543121e6f32SNicholas Piggin static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
544121e6f32SNicholas Piggin pgprot_t prot, struct page **pages)
545121e6f32SNicholas Piggin {
546121e6f32SNicholas Piggin unsigned long start = addr;
547121e6f32SNicholas Piggin pgd_t *pgd;
548121e6f32SNicholas Piggin unsigned long next;
549121e6f32SNicholas Piggin int err = 0;
550121e6f32SNicholas Piggin int nr = 0;
551121e6f32SNicholas Piggin pgtbl_mod_mask mask = 0;
552121e6f32SNicholas Piggin
553121e6f32SNicholas Piggin BUG_ON(addr >= end);
554121e6f32SNicholas Piggin pgd = pgd_offset_k(addr);
555121e6f32SNicholas Piggin do {
556121e6f32SNicholas Piggin next = pgd_addr_end(addr, end);
557121e6f32SNicholas Piggin if (pgd_bad(*pgd))
558121e6f32SNicholas Piggin mask |= PGTBL_PGD_MODIFIED;
559121e6f32SNicholas Piggin err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
560121e6f32SNicholas Piggin if (err)
561121e6f32SNicholas Piggin return err;
562121e6f32SNicholas Piggin } while (pgd++, addr = next, addr != end);
563121e6f32SNicholas Piggin
564121e6f32SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
565121e6f32SNicholas Piggin arch_sync_kernel_mappings(start, end);
566121e6f32SNicholas Piggin
567121e6f32SNicholas Piggin return 0;
568121e6f32SNicholas Piggin }
569121e6f32SNicholas Piggin
570b67177ecSNicholas Piggin /*
571b67177ecSNicholas Piggin * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
572b67177ecSNicholas Piggin * flush caches.
573b67177ecSNicholas Piggin *
574b67177ecSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() after this
575b67177ecSNicholas Piggin * function returns successfully and before the addresses are accessed.
576b67177ecSNicholas Piggin *
577b67177ecSNicholas Piggin * This is an internal function only. Do not use outside mm/.
578b67177ecSNicholas Piggin */
__vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)579b073d7f8SAlexander Potapenko int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
580121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift)
581121e6f32SNicholas Piggin {
582121e6f32SNicholas Piggin unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
583121e6f32SNicholas Piggin
584121e6f32SNicholas Piggin WARN_ON(page_shift < PAGE_SHIFT);
585121e6f32SNicholas Piggin
586121e6f32SNicholas Piggin if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
587121e6f32SNicholas Piggin page_shift == PAGE_SHIFT)
588121e6f32SNicholas Piggin return vmap_small_pages_range_noflush(addr, end, prot, pages);
589121e6f32SNicholas Piggin
590121e6f32SNicholas Piggin for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
591121e6f32SNicholas Piggin int err;
592121e6f32SNicholas Piggin
593121e6f32SNicholas Piggin err = vmap_range_noflush(addr, addr + (1UL << page_shift),
59408262ac5SMatthew Wilcox page_to_phys(pages[i]), prot,
595121e6f32SNicholas Piggin page_shift);
596121e6f32SNicholas Piggin if (err)
597121e6f32SNicholas Piggin return err;
598121e6f32SNicholas Piggin
599121e6f32SNicholas Piggin addr += 1UL << page_shift;
600121e6f32SNicholas Piggin }
601121e6f32SNicholas Piggin
602121e6f32SNicholas Piggin return 0;
603121e6f32SNicholas Piggin }
604121e6f32SNicholas Piggin
vmap_pages_range_noflush(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)605b073d7f8SAlexander Potapenko int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
606b073d7f8SAlexander Potapenko pgprot_t prot, struct page **pages, unsigned int page_shift)
607b073d7f8SAlexander Potapenko {
60847ebd031SAlexander Potapenko int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
60947ebd031SAlexander Potapenko page_shift);
61047ebd031SAlexander Potapenko
61147ebd031SAlexander Potapenko if (ret)
61247ebd031SAlexander Potapenko return ret;
613b073d7f8SAlexander Potapenko return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
614b073d7f8SAlexander Potapenko }
615b073d7f8SAlexander Potapenko
616b67177ecSNicholas Piggin /**
617b67177ecSNicholas Piggin * vmap_pages_range - map pages to a kernel virtual address
618b67177ecSNicholas Piggin * @addr: start of the VM area to map
619b67177ecSNicholas Piggin * @end: end of the VM area to map (non-inclusive)
620b67177ecSNicholas Piggin * @prot: page protection flags to use
621b67177ecSNicholas Piggin * @pages: pages to map (always PAGE_SIZE pages)
622b67177ecSNicholas Piggin * @page_shift: maximum shift that the pages may be mapped with, @pages must
623b67177ecSNicholas Piggin * be aligned and contiguous up to at least this shift.
624b67177ecSNicholas Piggin *
625b67177ecSNicholas Piggin * RETURNS:
626b67177ecSNicholas Piggin * 0 on success, -errno on failure.
627b67177ecSNicholas Piggin */
vmap_pages_range(unsigned long addr,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)628121e6f32SNicholas Piggin static int vmap_pages_range(unsigned long addr, unsigned long end,
629121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift)
630121e6f32SNicholas Piggin {
631121e6f32SNicholas Piggin int err;
632121e6f32SNicholas Piggin
633121e6f32SNicholas Piggin err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
634121e6f32SNicholas Piggin flush_cache_vmap(addr, end);
635121e6f32SNicholas Piggin return err;
636121e6f32SNicholas Piggin }
637121e6f32SNicholas Piggin
is_vmalloc_or_module_addr(const void * x)63881ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x)
63973bdf0a6SLinus Torvalds {
64073bdf0a6SLinus Torvalds /*
641ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place,
64273bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others
64373bdf0a6SLinus Torvalds * just put it in the vmalloc space.
64473bdf0a6SLinus Torvalds */
64573bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
6464aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x);
64773bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END)
64873bdf0a6SLinus Torvalds return 1;
64973bdf0a6SLinus Torvalds #endif
65073bdf0a6SLinus Torvalds return is_vmalloc_addr(x);
65173bdf0a6SLinus Torvalds }
65201858469SDavid Howells EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
65373bdf0a6SLinus Torvalds
65448667e7aSChristoph Lameter /*
655c0eb315aSNicholas Piggin * Walk a vmap address to the struct page it maps. Huge vmap mappings will
656c0eb315aSNicholas Piggin * return the tail page that corresponds to the base page address, which
657c0eb315aSNicholas Piggin * matches small vmap mappings.
65848667e7aSChristoph Lameter */
vmalloc_to_page(const void * vmalloc_addr)659add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr)
66048667e7aSChristoph Lameter {
66148667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr;
662add688fbSmalc struct page *page = NULL;
66348667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr);
664c2febafcSKirill A. Shutemov p4d_t *p4d;
665c2febafcSKirill A. Shutemov pud_t *pud;
666c2febafcSKirill A. Shutemov pmd_t *pmd;
667c2febafcSKirill A. Shutemov pte_t *ptep, pte;
66848667e7aSChristoph Lameter
6697aa413deSIngo Molnar /*
6707aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for
6717aa413deSIngo Molnar * architectures that do not vmalloc module space
6727aa413deSIngo Molnar */
67373bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
67459ea7463SJiri Slaby
675c2febafcSKirill A. Shutemov if (pgd_none(*pgd))
676c2febafcSKirill A. Shutemov return NULL;
677c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_leaf(*pgd)))
678c0eb315aSNicholas Piggin return NULL; /* XXX: no allowance for huge pgd */
679c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_bad(*pgd)))
680c0eb315aSNicholas Piggin return NULL;
681c0eb315aSNicholas Piggin
682c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr);
683c2febafcSKirill A. Shutemov if (p4d_none(*p4d))
684c2febafcSKirill A. Shutemov return NULL;
685c0eb315aSNicholas Piggin if (p4d_leaf(*p4d))
686c0eb315aSNicholas Piggin return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
687c0eb315aSNicholas Piggin if (WARN_ON_ONCE(p4d_bad(*p4d)))
688c2febafcSKirill A. Shutemov return NULL;
689c0eb315aSNicholas Piggin
690c0eb315aSNicholas Piggin pud = pud_offset(p4d, addr);
691c0eb315aSNicholas Piggin if (pud_none(*pud))
692c0eb315aSNicholas Piggin return NULL;
693c0eb315aSNicholas Piggin if (pud_leaf(*pud))
694c0eb315aSNicholas Piggin return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
695c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pud_bad(*pud)))
696c0eb315aSNicholas Piggin return NULL;
697c0eb315aSNicholas Piggin
698c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr);
699c0eb315aSNicholas Piggin if (pmd_none(*pmd))
700c0eb315aSNicholas Piggin return NULL;
701c0eb315aSNicholas Piggin if (pmd_leaf(*pmd))
702c0eb315aSNicholas Piggin return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
703c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pmd_bad(*pmd)))
704c2febafcSKirill A. Shutemov return NULL;
705db64fe02SNick Piggin
7060d1c81edSHugh Dickins ptep = pte_offset_kernel(pmd, addr);
707c33c7948SRyan Roberts pte = ptep_get(ptep);
70848667e7aSChristoph Lameter if (pte_present(pte))
709add688fbSmalc page = pte_page(pte);
710c0eb315aSNicholas Piggin
711add688fbSmalc return page;
712ece86e22SJianyu Zhan }
713ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page);
714ece86e22SJianyu Zhan
715add688fbSmalc /*
716add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number.
717add688fbSmalc */
vmalloc_to_pfn(const void * vmalloc_addr)718add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
719add688fbSmalc {
720add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr));
721add688fbSmalc }
722add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn);
723add688fbSmalc
724db64fe02SNick Piggin
725db64fe02SNick Piggin /*** Global kva allocator ***/
726db64fe02SNick Piggin
727bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
728a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
729bb850f4dSUladzislau Rezki (Sony)
730db64fe02SNick Piggin
731db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock);
732e36176beSUladzislau Rezki (Sony) static DEFINE_SPINLOCK(free_vmap_area_lock);
733f1c4069eSJoonsoo Kim /* Export for kexec only */
734f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list);
73589699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT;
73668ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly;
73789699605SNick Piggin
73896e2db45SUladzislau Rezki (Sony) static struct rb_root purge_vmap_area_root = RB_ROOT;
73996e2db45SUladzislau Rezki (Sony) static LIST_HEAD(purge_vmap_area_list);
74096e2db45SUladzislau Rezki (Sony) static DEFINE_SPINLOCK(purge_vmap_area_lock);
74196e2db45SUladzislau Rezki (Sony)
74268ad4a33SUladzislau Rezki (Sony) /*
74368ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of
74468ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to
74568ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of
74668ad4a33SUladzislau Rezki (Sony) * free block.
74768ad4a33SUladzislau Rezki (Sony) */
74868ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep;
74989699605SNick Piggin
75068ad4a33SUladzislau Rezki (Sony) /*
75168ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root.
75268ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing.
75368ad4a33SUladzislau Rezki (Sony) */
75468ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list);
75568ad4a33SUladzislau Rezki (Sony)
75668ad4a33SUladzislau Rezki (Sony) /*
75768ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space.
75868ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start
75968ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap
76068ad4a33SUladzislau Rezki (Sony) * object is released.
76168ad4a33SUladzislau Rezki (Sony) *
76268ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block
76368ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to
76468ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area.
76568ad4a33SUladzislau Rezki (Sony) */
76668ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT;
76768ad4a33SUladzislau Rezki (Sony)
76882dd23e8SUladzislau Rezki (Sony) /*
76982dd23e8SUladzislau Rezki (Sony) * Preload a CPU with one object for "no edge" split case. The
77082dd23e8SUladzislau Rezki (Sony) * aim is to get rid of allocations from the atomic context, thus
77182dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks.
77282dd23e8SUladzislau Rezki (Sony) */
77382dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
77482dd23e8SUladzislau Rezki (Sony)
77568ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long
va_size(struct vmap_area * va)77668ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va)
77768ad4a33SUladzislau Rezki (Sony) {
77868ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start);
77968ad4a33SUladzislau Rezki (Sony) }
78068ad4a33SUladzislau Rezki (Sony)
78168ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long
get_subtree_max_size(struct rb_node * node)78268ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node)
78368ad4a33SUladzislau Rezki (Sony) {
78468ad4a33SUladzislau Rezki (Sony) struct vmap_area *va;
78568ad4a33SUladzislau Rezki (Sony)
78668ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node);
78768ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0;
78868ad4a33SUladzislau Rezki (Sony) }
78968ad4a33SUladzislau Rezki (Sony)
790315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
791315cc066SMichel Lespinasse struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
79268ad4a33SUladzislau Rezki (Sony)
79377e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void);
79468ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
795690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work);
796690467c8SUladzislau Rezki (Sony) static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
797db64fe02SNick Piggin
79897105f0aSRoman Gushchin static atomic_long_t nr_vmalloc_pages;
79997105f0aSRoman Gushchin
vmalloc_nr_pages(void)80097105f0aSRoman Gushchin unsigned long vmalloc_nr_pages(void)
80197105f0aSRoman Gushchin {
80297105f0aSRoman Gushchin return atomic_long_read(&nr_vmalloc_pages);
80397105f0aSRoman Gushchin }
80497105f0aSRoman Gushchin
805153090f2SBaoquan He /* Look up the first VA which satisfies addr < va_end, NULL if none. */
find_vmap_area_exceed_addr(unsigned long addr)806f181234aSChen Wandun static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
807f181234aSChen Wandun {
808f181234aSChen Wandun struct vmap_area *va = NULL;
809f181234aSChen Wandun struct rb_node *n = vmap_area_root.rb_node;
810f181234aSChen Wandun
8114aff1dc4SAndrey Konovalov addr = (unsigned long)kasan_reset_tag((void *)addr);
8124aff1dc4SAndrey Konovalov
813f181234aSChen Wandun while (n) {
814f181234aSChen Wandun struct vmap_area *tmp;
815f181234aSChen Wandun
816f181234aSChen Wandun tmp = rb_entry(n, struct vmap_area, rb_node);
817f181234aSChen Wandun if (tmp->va_end > addr) {
818f181234aSChen Wandun va = tmp;
819f181234aSChen Wandun if (tmp->va_start <= addr)
820f181234aSChen Wandun break;
821f181234aSChen Wandun
822f181234aSChen Wandun n = n->rb_left;
823f181234aSChen Wandun } else
824f181234aSChen Wandun n = n->rb_right;
825f181234aSChen Wandun }
826f181234aSChen Wandun
827f181234aSChen Wandun return va;
828f181234aSChen Wandun }
829f181234aSChen Wandun
__find_vmap_area(unsigned long addr,struct rb_root * root)830899c6efeSUladzislau Rezki (Sony) static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
8311da177e4SLinus Torvalds {
832899c6efeSUladzislau Rezki (Sony) struct rb_node *n = root->rb_node;
833db64fe02SNick Piggin
8344aff1dc4SAndrey Konovalov addr = (unsigned long)kasan_reset_tag((void *)addr);
8354aff1dc4SAndrey Konovalov
836db64fe02SNick Piggin while (n) {
837db64fe02SNick Piggin struct vmap_area *va;
838db64fe02SNick Piggin
839db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node);
840db64fe02SNick Piggin if (addr < va->va_start)
841db64fe02SNick Piggin n = n->rb_left;
842cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end)
843db64fe02SNick Piggin n = n->rb_right;
844db64fe02SNick Piggin else
845db64fe02SNick Piggin return va;
846db64fe02SNick Piggin }
847db64fe02SNick Piggin
848db64fe02SNick Piggin return NULL;
849db64fe02SNick Piggin }
850db64fe02SNick Piggin
85168ad4a33SUladzislau Rezki (Sony) /*
85268ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node
85368ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing.
8549c801f61SUladzislau Rezki (Sony) *
8559c801f61SUladzislau Rezki (Sony) * Otherwise NULL is returned. In that case all further
8569c801f61SUladzislau Rezki (Sony) * steps regarding inserting of conflicting overlap range
8579c801f61SUladzislau Rezki (Sony) * have to be declined and actually considered as a bug.
85868ad4a33SUladzislau Rezki (Sony) */
85968ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node **
find_va_links(struct vmap_area * va,struct rb_root * root,struct rb_node * from,struct rb_node ** parent)86068ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va,
86168ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from,
86268ad4a33SUladzislau Rezki (Sony) struct rb_node **parent)
863db64fe02SNick Piggin {
864170168d0SNamhyung Kim struct vmap_area *tmp_va;
86568ad4a33SUladzislau Rezki (Sony) struct rb_node **link;
866db64fe02SNick Piggin
86768ad4a33SUladzislau Rezki (Sony) if (root) {
86868ad4a33SUladzislau Rezki (Sony) link = &root->rb_node;
86968ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) {
87068ad4a33SUladzislau Rezki (Sony) *parent = NULL;
87168ad4a33SUladzislau Rezki (Sony) return link;
87268ad4a33SUladzislau Rezki (Sony) }
87368ad4a33SUladzislau Rezki (Sony) } else {
87468ad4a33SUladzislau Rezki (Sony) link = &from;
87568ad4a33SUladzislau Rezki (Sony) }
87668ad4a33SUladzislau Rezki (Sony)
87768ad4a33SUladzislau Rezki (Sony) /*
87868ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point
87968ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name
88068ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to.
88168ad4a33SUladzislau Rezki (Sony) */
88268ad4a33SUladzislau Rezki (Sony) do {
88368ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node);
88468ad4a33SUladzislau Rezki (Sony)
88568ad4a33SUladzislau Rezki (Sony) /*
88668ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check.
88768ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right)
88868ad4a33SUladzislau Rezki (Sony) * or full overlaps.
88968ad4a33SUladzislau Rezki (Sony) */
890753df96bSBaoquan He if (va->va_end <= tmp_va->va_start)
89168ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left;
892753df96bSBaoquan He else if (va->va_start >= tmp_va->va_end)
89368ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right;
8949c801f61SUladzislau Rezki (Sony) else {
8959c801f61SUladzislau Rezki (Sony) WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
8969c801f61SUladzislau Rezki (Sony) va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
8979c801f61SUladzislau Rezki (Sony)
8989c801f61SUladzislau Rezki (Sony) return NULL;
8999c801f61SUladzislau Rezki (Sony) }
90068ad4a33SUladzislau Rezki (Sony) } while (*link);
90168ad4a33SUladzislau Rezki (Sony)
90268ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node;
90368ad4a33SUladzislau Rezki (Sony) return link;
904db64fe02SNick Piggin }
905db64fe02SNick Piggin
90668ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head *
get_va_next_sibling(struct rb_node * parent,struct rb_node ** link)90768ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
90868ad4a33SUladzislau Rezki (Sony) {
90968ad4a33SUladzislau Rezki (Sony) struct list_head *list;
910db64fe02SNick Piggin
91168ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent))
91268ad4a33SUladzislau Rezki (Sony) /*
91368ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors
91468ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means
91568ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not
91668ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway.
91768ad4a33SUladzislau Rezki (Sony) */
91868ad4a33SUladzislau Rezki (Sony) return NULL;
91968ad4a33SUladzislau Rezki (Sony)
92068ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list;
92168ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list);
922db64fe02SNick Piggin }
923db64fe02SNick Piggin
92468ad4a33SUladzislau Rezki (Sony) static __always_inline void
__link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head,bool augment)9258eb510dbSUladzislau Rezki (Sony) __link_va(struct vmap_area *va, struct rb_root *root,
9268eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link,
9278eb510dbSUladzislau Rezki (Sony) struct list_head *head, bool augment)
92868ad4a33SUladzislau Rezki (Sony) {
92968ad4a33SUladzislau Rezki (Sony) /*
93068ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can
93168ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node.
93268ad4a33SUladzislau Rezki (Sony) */
93368ad4a33SUladzislau Rezki (Sony) if (likely(parent)) {
93468ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list;
93568ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link)
93668ad4a33SUladzislau Rezki (Sony) head = head->prev;
93768ad4a33SUladzislau Rezki (Sony) }
938db64fe02SNick Piggin
93968ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */
94068ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link);
9418eb510dbSUladzislau Rezki (Sony) if (augment) {
94268ad4a33SUladzislau Rezki (Sony) /*
94368ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion
94468ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to
94568ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented().
946153090f2SBaoquan He * It is because we populate the tree from the bottom
94768ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree.
94868ad4a33SUladzislau Rezki (Sony) *
94968ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion,
95068ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to
95168ad4a33SUladzislau Rezki (Sony) * the correct order later on.
95268ad4a33SUladzislau Rezki (Sony) */
95368ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node,
95468ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb);
95568ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0;
95668ad4a33SUladzislau Rezki (Sony) } else {
95768ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root);
95868ad4a33SUladzislau Rezki (Sony) }
95968ad4a33SUladzislau Rezki (Sony)
96068ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */
96168ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head);
96268ad4a33SUladzislau Rezki (Sony) }
96368ad4a33SUladzislau Rezki (Sony)
96468ad4a33SUladzislau Rezki (Sony) static __always_inline void
link_va(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)9658eb510dbSUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root,
9668eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link,
9678eb510dbSUladzislau Rezki (Sony) struct list_head *head)
9688eb510dbSUladzislau Rezki (Sony) {
9698eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, false);
9708eb510dbSUladzislau Rezki (Sony) }
9718eb510dbSUladzislau Rezki (Sony)
9728eb510dbSUladzislau Rezki (Sony) static __always_inline void
link_va_augment(struct vmap_area * va,struct rb_root * root,struct rb_node * parent,struct rb_node ** link,struct list_head * head)9738eb510dbSUladzislau Rezki (Sony) link_va_augment(struct vmap_area *va, struct rb_root *root,
9748eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link,
9758eb510dbSUladzislau Rezki (Sony) struct list_head *head)
9768eb510dbSUladzislau Rezki (Sony) {
9778eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, true);
9788eb510dbSUladzislau Rezki (Sony) }
9798eb510dbSUladzislau Rezki (Sony)
9808eb510dbSUladzislau Rezki (Sony) static __always_inline void
__unlink_va(struct vmap_area * va,struct rb_root * root,bool augment)9818eb510dbSUladzislau Rezki (Sony) __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
98268ad4a33SUladzislau Rezki (Sony) {
983460e42d1SUladzislau Rezki (Sony) if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
984460e42d1SUladzislau Rezki (Sony) return;
985460e42d1SUladzislau Rezki (Sony)
9868eb510dbSUladzislau Rezki (Sony) if (augment)
98768ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node,
98868ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb);
98968ad4a33SUladzislau Rezki (Sony) else
99068ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root);
99168ad4a33SUladzislau Rezki (Sony)
9925d7a7c54SUladzislau Rezki (Sony) list_del_init(&va->list);
99368ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node);
99468ad4a33SUladzislau Rezki (Sony) }
99568ad4a33SUladzislau Rezki (Sony)
9968eb510dbSUladzislau Rezki (Sony) static __always_inline void
unlink_va(struct vmap_area * va,struct rb_root * root)9978eb510dbSUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root)
9988eb510dbSUladzislau Rezki (Sony) {
9998eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, false);
10008eb510dbSUladzislau Rezki (Sony) }
10018eb510dbSUladzislau Rezki (Sony)
10028eb510dbSUladzislau Rezki (Sony) static __always_inline void
unlink_va_augment(struct vmap_area * va,struct rb_root * root)10038eb510dbSUladzislau Rezki (Sony) unlink_va_augment(struct vmap_area *va, struct rb_root *root)
10048eb510dbSUladzislau Rezki (Sony) {
10058eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, true);
10068eb510dbSUladzislau Rezki (Sony) }
10078eb510dbSUladzislau Rezki (Sony)
1008bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK
1009c3385e84SJiapeng Chong /*
1010c3385e84SJiapeng Chong * Gets called when remove the node and rotate.
1011c3385e84SJiapeng Chong */
1012c3385e84SJiapeng Chong static __always_inline unsigned long
compute_subtree_max_size(struct vmap_area * va)1013c3385e84SJiapeng Chong compute_subtree_max_size(struct vmap_area *va)
1014c3385e84SJiapeng Chong {
1015c3385e84SJiapeng Chong return max3(va_size(va),
1016c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_left),
1017c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_right));
1018c3385e84SJiapeng Chong }
1019c3385e84SJiapeng Chong
1020bb850f4dSUladzislau Rezki (Sony) static void
augment_tree_propagate_check(void)1021da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(void)
1022bb850f4dSUladzislau Rezki (Sony) {
1023bb850f4dSUladzislau Rezki (Sony) struct vmap_area *va;
1024da27c9edSUladzislau Rezki (Sony) unsigned long computed_size;
1025bb850f4dSUladzislau Rezki (Sony)
1026da27c9edSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) {
1027da27c9edSUladzislau Rezki (Sony) computed_size = compute_subtree_max_size(va);
1028da27c9edSUladzislau Rezki (Sony) if (computed_size != va->subtree_max_size)
1029bb850f4dSUladzislau Rezki (Sony) pr_emerg("tree is corrupted: %lu, %lu\n",
1030bb850f4dSUladzislau Rezki (Sony) va_size(va), va->subtree_max_size);
1031bb850f4dSUladzislau Rezki (Sony) }
1032bb850f4dSUladzislau Rezki (Sony) }
1033bb850f4dSUladzislau Rezki (Sony) #endif
1034bb850f4dSUladzislau Rezki (Sony)
103568ad4a33SUladzislau Rezki (Sony) /*
103668ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper
103768ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done
103868ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or
103968ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree.
104068ad4a33SUladzislau Rezki (Sony) *
104168ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called:
104268ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path);
104368ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path);
104468ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path).
104568ad4a33SUladzislau Rezki (Sony) *
104668ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes
104768ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up
104868ad4a33SUladzislau Rezki (Sony) * to the root node.
104968ad4a33SUladzislau Rezki (Sony) *
105068ad4a33SUladzislau Rezki (Sony) * 4--8
105168ad4a33SUladzislau Rezki (Sony) * /\
105268ad4a33SUladzislau Rezki (Sony) * / \
105368ad4a33SUladzislau Rezki (Sony) * / \
105468ad4a33SUladzislau Rezki (Sony) * 2--2 8--8
105568ad4a33SUladzislau Rezki (Sony) *
105668ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then
105768ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1
105868ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink
105968ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent
106068ad4a33SUladzislau Rezki (Sony) * node becomes 4--6.
106168ad4a33SUladzislau Rezki (Sony) */
106268ad4a33SUladzislau Rezki (Sony) static __always_inline void
augment_tree_propagate_from(struct vmap_area * va)106368ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va)
106468ad4a33SUladzislau Rezki (Sony) {
106568ad4a33SUladzislau Rezki (Sony) /*
106615ae144fSUladzislau Rezki (Sony) * Populate the tree from bottom towards the root until
106715ae144fSUladzislau Rezki (Sony) * the calculated maximum available size of checked node
106815ae144fSUladzislau Rezki (Sony) * is equal to its current one.
106968ad4a33SUladzislau Rezki (Sony) */
107015ae144fSUladzislau Rezki (Sony) free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1071bb850f4dSUladzislau Rezki (Sony)
1072bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK
1073da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check();
1074bb850f4dSUladzislau Rezki (Sony) #endif
107568ad4a33SUladzislau Rezki (Sony) }
107668ad4a33SUladzislau Rezki (Sony)
107768ad4a33SUladzislau Rezki (Sony) static void
insert_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)107868ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va,
107968ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head)
108068ad4a33SUladzislau Rezki (Sony) {
108168ad4a33SUladzislau Rezki (Sony) struct rb_node **link;
108268ad4a33SUladzislau Rezki (Sony) struct rb_node *parent;
108368ad4a33SUladzislau Rezki (Sony)
108468ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent);
10859c801f61SUladzislau Rezki (Sony) if (link)
108668ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head);
108768ad4a33SUladzislau Rezki (Sony) }
108868ad4a33SUladzislau Rezki (Sony)
108968ad4a33SUladzislau Rezki (Sony) static void
insert_vmap_area_augment(struct vmap_area * va,struct rb_node * from,struct rb_root * root,struct list_head * head)109068ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va,
109168ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root,
109268ad4a33SUladzislau Rezki (Sony) struct list_head *head)
109368ad4a33SUladzislau Rezki (Sony) {
109468ad4a33SUladzislau Rezki (Sony) struct rb_node **link;
109568ad4a33SUladzislau Rezki (Sony) struct rb_node *parent;
109668ad4a33SUladzislau Rezki (Sony)
109768ad4a33SUladzislau Rezki (Sony) if (from)
109868ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent);
109968ad4a33SUladzislau Rezki (Sony) else
110068ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent);
110168ad4a33SUladzislau Rezki (Sony)
11029c801f61SUladzislau Rezki (Sony) if (link) {
11038eb510dbSUladzislau Rezki (Sony) link_va_augment(va, root, parent, link, head);
110468ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va);
110568ad4a33SUladzislau Rezki (Sony) }
11069c801f61SUladzislau Rezki (Sony) }
110768ad4a33SUladzislau Rezki (Sony)
110868ad4a33SUladzislau Rezki (Sony) /*
110968ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous
111068ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new
111168ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is
111268ad4a33SUladzislau Rezki (Sony) * freed.
11139c801f61SUladzislau Rezki (Sony) *
11149c801f61SUladzislau Rezki (Sony) * Please note, it can return NULL in case of overlap
11159c801f61SUladzislau Rezki (Sony) * ranges, followed by WARN() report. Despite it is a
11169c801f61SUladzislau Rezki (Sony) * buggy behaviour, a system can be alive and keep
11179c801f61SUladzislau Rezki (Sony) * ongoing.
111868ad4a33SUladzislau Rezki (Sony) */
11193c5c3cfbSDaniel Axtens static __always_inline struct vmap_area *
__merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head,bool augment)11208eb510dbSUladzislau Rezki (Sony) __merge_or_add_vmap_area(struct vmap_area *va,
11218eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head, bool augment)
112268ad4a33SUladzislau Rezki (Sony) {
112368ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling;
112468ad4a33SUladzislau Rezki (Sony) struct list_head *next;
112568ad4a33SUladzislau Rezki (Sony) struct rb_node **link;
112668ad4a33SUladzislau Rezki (Sony) struct rb_node *parent;
112768ad4a33SUladzislau Rezki (Sony) bool merged = false;
112868ad4a33SUladzislau Rezki (Sony)
112968ad4a33SUladzislau Rezki (Sony) /*
113068ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be
113168ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings.
113268ad4a33SUladzislau Rezki (Sony) */
113368ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent);
11349c801f61SUladzislau Rezki (Sony) if (!link)
11359c801f61SUladzislau Rezki (Sony) return NULL;
113668ad4a33SUladzislau Rezki (Sony)
113768ad4a33SUladzislau Rezki (Sony) /*
113868ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done.
113968ad4a33SUladzislau Rezki (Sony) */
114068ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link);
114168ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL))
114268ad4a33SUladzislau Rezki (Sony) goto insert;
114368ad4a33SUladzislau Rezki (Sony)
114468ad4a33SUladzislau Rezki (Sony) /*
114568ad4a33SUladzislau Rezki (Sony) * start end
114668ad4a33SUladzislau Rezki (Sony) * | |
114768ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->|
114868ad4a33SUladzislau Rezki (Sony) * | |
114968ad4a33SUladzislau Rezki (Sony) * start end
115068ad4a33SUladzislau Rezki (Sony) */
115168ad4a33SUladzislau Rezki (Sony) if (next != head) {
115268ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list);
115368ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) {
115468ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start;
115568ad4a33SUladzislau Rezki (Sony)
115668ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */
115768ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
115868ad4a33SUladzislau Rezki (Sony)
115968ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */
116068ad4a33SUladzislau Rezki (Sony) va = sibling;
116168ad4a33SUladzislau Rezki (Sony) merged = true;
116268ad4a33SUladzislau Rezki (Sony) }
116368ad4a33SUladzislau Rezki (Sony) }
116468ad4a33SUladzislau Rezki (Sony)
116568ad4a33SUladzislau Rezki (Sony) /*
116668ad4a33SUladzislau Rezki (Sony) * start end
116768ad4a33SUladzislau Rezki (Sony) * | |
116868ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>|
116968ad4a33SUladzislau Rezki (Sony) * | |
117068ad4a33SUladzislau Rezki (Sony) * start end
117168ad4a33SUladzislau Rezki (Sony) */
117268ad4a33SUladzislau Rezki (Sony) if (next->prev != head) {
117368ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list);
117468ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) {
11755dd78640SUladzislau Rezki (Sony) /*
11765dd78640SUladzislau Rezki (Sony) * If both neighbors are coalesced, it is important
11775dd78640SUladzislau Rezki (Sony) * to unlink the "next" node first, followed by merging
11785dd78640SUladzislau Rezki (Sony) * with "previous" one. Otherwise the tree might not be
11795dd78640SUladzislau Rezki (Sony) * fully populated if a sibling's augmented value is
11805dd78640SUladzislau Rezki (Sony) * "normalized" because of rotation operations.
11815dd78640SUladzislau Rezki (Sony) */
118254f63d9dSUladzislau Rezki (Sony) if (merged)
11838eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, augment);
118468ad4a33SUladzislau Rezki (Sony)
11855dd78640SUladzislau Rezki (Sony) sibling->va_end = va->va_end;
11865dd78640SUladzislau Rezki (Sony)
118768ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */
118868ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
11893c5c3cfbSDaniel Axtens
11903c5c3cfbSDaniel Axtens /* Point to the new merged area. */
11913c5c3cfbSDaniel Axtens va = sibling;
11923c5c3cfbSDaniel Axtens merged = true;
119368ad4a33SUladzislau Rezki (Sony) }
119468ad4a33SUladzislau Rezki (Sony) }
119568ad4a33SUladzislau Rezki (Sony)
119668ad4a33SUladzislau Rezki (Sony) insert:
11975dd78640SUladzislau Rezki (Sony) if (!merged)
11988eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, augment);
11993c5c3cfbSDaniel Axtens
120096e2db45SUladzislau Rezki (Sony) return va;
120196e2db45SUladzislau Rezki (Sony) }
120296e2db45SUladzislau Rezki (Sony)
120396e2db45SUladzislau Rezki (Sony) static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area * va,struct rb_root * root,struct list_head * head)12048eb510dbSUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va,
12058eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head)
12068eb510dbSUladzislau Rezki (Sony) {
12078eb510dbSUladzislau Rezki (Sony) return __merge_or_add_vmap_area(va, root, head, false);
12088eb510dbSUladzislau Rezki (Sony) }
12098eb510dbSUladzislau Rezki (Sony)
12108eb510dbSUladzislau Rezki (Sony) static __always_inline struct vmap_area *
merge_or_add_vmap_area_augment(struct vmap_area * va,struct rb_root * root,struct list_head * head)121196e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(struct vmap_area *va,
121296e2db45SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head)
121396e2db45SUladzislau Rezki (Sony) {
12148eb510dbSUladzislau Rezki (Sony) va = __merge_or_add_vmap_area(va, root, head, true);
121596e2db45SUladzislau Rezki (Sony) if (va)
12165dd78640SUladzislau Rezki (Sony) augment_tree_propagate_from(va);
121796e2db45SUladzislau Rezki (Sony)
12183c5c3cfbSDaniel Axtens return va;
121968ad4a33SUladzislau Rezki (Sony) }
122068ad4a33SUladzislau Rezki (Sony)
122168ad4a33SUladzislau Rezki (Sony) static __always_inline bool
is_within_this_va(struct vmap_area * va,unsigned long size,unsigned long align,unsigned long vstart)122268ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size,
122368ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart)
122468ad4a33SUladzislau Rezki (Sony) {
122568ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr;
122668ad4a33SUladzislau Rezki (Sony)
122768ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart)
122868ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align);
122968ad4a33SUladzislau Rezki (Sony) else
123068ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align);
123168ad4a33SUladzislau Rezki (Sony)
123268ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */
123368ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr ||
123468ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart)
123568ad4a33SUladzislau Rezki (Sony) return false;
123668ad4a33SUladzislau Rezki (Sony)
123768ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end);
123868ad4a33SUladzislau Rezki (Sony) }
123968ad4a33SUladzislau Rezki (Sony)
124068ad4a33SUladzislau Rezki (Sony) /*
124168ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree,
124268ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing
12439333fe98SUladzislau Rezki * parameters. Please note, with an alignment bigger than PAGE_SIZE,
12449333fe98SUladzislau Rezki * a search length is adjusted to account for worst case alignment
12459333fe98SUladzislau Rezki * overhead.
124668ad4a33SUladzislau Rezki (Sony) */
124768ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area *
find_vmap_lowest_match(struct rb_root * root,unsigned long size,unsigned long align,unsigned long vstart,bool adjust_search_size)1248f9863be4SUladzislau Rezki (Sony) find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1249f9863be4SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart, bool adjust_search_size)
125068ad4a33SUladzislau Rezki (Sony) {
125168ad4a33SUladzislau Rezki (Sony) struct vmap_area *va;
125268ad4a33SUladzislau Rezki (Sony) struct rb_node *node;
12539333fe98SUladzislau Rezki unsigned long length;
125468ad4a33SUladzislau Rezki (Sony)
125568ad4a33SUladzislau Rezki (Sony) /* Start from the root. */
1256f9863be4SUladzislau Rezki (Sony) node = root->rb_node;
125768ad4a33SUladzislau Rezki (Sony)
12589333fe98SUladzislau Rezki /* Adjust the search size for alignment overhead. */
12599333fe98SUladzislau Rezki length = adjust_search_size ? size + align - 1 : size;
12609333fe98SUladzislau Rezki
126168ad4a33SUladzislau Rezki (Sony) while (node) {
126268ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node);
126368ad4a33SUladzislau Rezki (Sony)
12649333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_left) >= length &&
126568ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) {
126668ad4a33SUladzislau Rezki (Sony) node = node->rb_left;
126768ad4a33SUladzislau Rezki (Sony) } else {
126868ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart))
126968ad4a33SUladzislau Rezki (Sony) return va;
127068ad4a33SUladzislau Rezki (Sony)
127168ad4a33SUladzislau Rezki (Sony) /*
127268ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right
127368ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is
12749333fe98SUladzislau Rezki * equal or bigger to the requested search length.
127568ad4a33SUladzislau Rezki (Sony) */
12769333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length) {
127768ad4a33SUladzislau Rezki (Sony) node = node->rb_right;
127868ad4a33SUladzislau Rezki (Sony) continue;
127968ad4a33SUladzislau Rezki (Sony) }
128068ad4a33SUladzislau Rezki (Sony)
128168ad4a33SUladzislau Rezki (Sony) /*
12823806b041SAndrew Morton * OK. We roll back and find the first right sub-tree,
128368ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen
12849f531973SUladzislau Rezki (Sony) * due to "vstart" restriction or an alignment overhead
12859f531973SUladzislau Rezki (Sony) * that is bigger then PAGE_SIZE.
128668ad4a33SUladzislau Rezki (Sony) */
128768ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) {
128868ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node);
128968ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart))
129068ad4a33SUladzislau Rezki (Sony) return va;
129168ad4a33SUladzislau Rezki (Sony)
12929333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length &&
129368ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) {
12949f531973SUladzislau Rezki (Sony) /*
12959f531973SUladzislau Rezki (Sony) * Shift the vstart forward. Please note, we update it with
12969f531973SUladzislau Rezki (Sony) * parent's start address adding "1" because we do not want
12979f531973SUladzislau Rezki (Sony) * to enter same sub-tree after it has already been checked
12989f531973SUladzislau Rezki (Sony) * and no suitable free block found there.
12999f531973SUladzislau Rezki (Sony) */
13009f531973SUladzislau Rezki (Sony) vstart = va->va_start + 1;
130168ad4a33SUladzislau Rezki (Sony) node = node->rb_right;
130268ad4a33SUladzislau Rezki (Sony) break;
130368ad4a33SUladzislau Rezki (Sony) }
130468ad4a33SUladzislau Rezki (Sony) }
130568ad4a33SUladzislau Rezki (Sony) }
130668ad4a33SUladzislau Rezki (Sony) }
130768ad4a33SUladzislau Rezki (Sony)
130868ad4a33SUladzislau Rezki (Sony) return NULL;
130968ad4a33SUladzislau Rezki (Sony) }
131068ad4a33SUladzislau Rezki (Sony)
1311a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1312a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h>
1313a6cf4e0fSUladzislau Rezki (Sony)
1314a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area *
find_vmap_lowest_linear_match(struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart)1315bd1264c3SSong Liu find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1316a6cf4e0fSUladzislau Rezki (Sony) unsigned long align, unsigned long vstart)
1317a6cf4e0fSUladzislau Rezki (Sony) {
1318a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va;
1319a6cf4e0fSUladzislau Rezki (Sony)
1320bd1264c3SSong Liu list_for_each_entry(va, head, list) {
1321a6cf4e0fSUladzislau Rezki (Sony) if (!is_within_this_va(va, size, align, vstart))
1322a6cf4e0fSUladzislau Rezki (Sony) continue;
1323a6cf4e0fSUladzislau Rezki (Sony)
1324a6cf4e0fSUladzislau Rezki (Sony) return va;
1325a6cf4e0fSUladzislau Rezki (Sony) }
1326a6cf4e0fSUladzislau Rezki (Sony)
1327a6cf4e0fSUladzislau Rezki (Sony) return NULL;
1328a6cf4e0fSUladzislau Rezki (Sony) }
1329a6cf4e0fSUladzislau Rezki (Sony)
1330a6cf4e0fSUladzislau Rezki (Sony) static void
find_vmap_lowest_match_check(struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align)1331bd1264c3SSong Liu find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1332bd1264c3SSong Liu unsigned long size, unsigned long align)
1333a6cf4e0fSUladzislau Rezki (Sony) {
1334a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va_1, *va_2;
1335a6cf4e0fSUladzislau Rezki (Sony) unsigned long vstart;
1336a6cf4e0fSUladzislau Rezki (Sony) unsigned int rnd;
1337a6cf4e0fSUladzislau Rezki (Sony)
1338a6cf4e0fSUladzislau Rezki (Sony) get_random_bytes(&rnd, sizeof(rnd));
1339a6cf4e0fSUladzislau Rezki (Sony) vstart = VMALLOC_START + rnd;
1340a6cf4e0fSUladzislau Rezki (Sony)
1341bd1264c3SSong Liu va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1342bd1264c3SSong Liu va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1343a6cf4e0fSUladzislau Rezki (Sony)
1344a6cf4e0fSUladzislau Rezki (Sony) if (va_1 != va_2)
1345a6cf4e0fSUladzislau Rezki (Sony) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1346a6cf4e0fSUladzislau Rezki (Sony) va_1, va_2, vstart);
1347a6cf4e0fSUladzislau Rezki (Sony) }
1348a6cf4e0fSUladzislau Rezki (Sony) #endif
1349a6cf4e0fSUladzislau Rezki (Sony)
135068ad4a33SUladzislau Rezki (Sony) enum fit_type {
135168ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0,
135268ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */
135368ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */
135468ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */
135568ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */
135668ad4a33SUladzislau Rezki (Sony) };
135768ad4a33SUladzislau Rezki (Sony)
135868ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type
classify_va_fit_type(struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)135968ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va,
136068ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size)
136168ad4a33SUladzislau Rezki (Sony) {
136268ad4a33SUladzislau Rezki (Sony) enum fit_type type;
136368ad4a33SUladzislau Rezki (Sony)
136468ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */
136568ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start ||
136668ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end)
136768ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT;
136868ad4a33SUladzislau Rezki (Sony)
136968ad4a33SUladzislau Rezki (Sony) /* Now classify. */
137068ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) {
137168ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size)
137268ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE;
137368ad4a33SUladzislau Rezki (Sony) else
137468ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE;
137568ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) {
137668ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE;
137768ad4a33SUladzislau Rezki (Sony) } else {
137868ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE;
137968ad4a33SUladzislau Rezki (Sony) }
138068ad4a33SUladzislau Rezki (Sony)
138168ad4a33SUladzislau Rezki (Sony) return type;
138268ad4a33SUladzislau Rezki (Sony) }
138368ad4a33SUladzislau Rezki (Sony)
138468ad4a33SUladzislau Rezki (Sony) static __always_inline int
adjust_va_to_fit_type(struct rb_root * root,struct list_head * head,struct vmap_area * va,unsigned long nva_start_addr,unsigned long size)1385f9863be4SUladzislau Rezki (Sony) adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
1386f9863be4SUladzislau Rezki (Sony) struct vmap_area *va, unsigned long nva_start_addr,
1387f9863be4SUladzislau Rezki (Sony) unsigned long size)
138868ad4a33SUladzislau Rezki (Sony) {
13892c929233SArnd Bergmann struct vmap_area *lva = NULL;
13901b23ff80SBaoquan He enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
139168ad4a33SUladzislau Rezki (Sony)
139268ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) {
139368ad4a33SUladzislau Rezki (Sony) /*
139468ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits.
139568ad4a33SUladzislau Rezki (Sony) *
139668ad4a33SUladzislau Rezki (Sony) * | |
139768ad4a33SUladzislau Rezki (Sony) * V NVA V
139868ad4a33SUladzislau Rezki (Sony) * |---------------|
139968ad4a33SUladzislau Rezki (Sony) */
1400f9863be4SUladzislau Rezki (Sony) unlink_va_augment(va, root);
140168ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
140268ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) {
140368ad4a33SUladzislau Rezki (Sony) /*
140468ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA.
140568ad4a33SUladzislau Rezki (Sony) *
140668ad4a33SUladzislau Rezki (Sony) * | |
140768ad4a33SUladzislau Rezki (Sony) * V NVA V R
140868ad4a33SUladzislau Rezki (Sony) * |-------|-------|
140968ad4a33SUladzislau Rezki (Sony) */
141068ad4a33SUladzislau Rezki (Sony) va->va_start += size;
141168ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) {
141268ad4a33SUladzislau Rezki (Sony) /*
141368ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA.
141468ad4a33SUladzislau Rezki (Sony) *
141568ad4a33SUladzislau Rezki (Sony) * | |
141668ad4a33SUladzislau Rezki (Sony) * L V NVA V
141768ad4a33SUladzislau Rezki (Sony) * |-------|-------|
141868ad4a33SUladzislau Rezki (Sony) */
141968ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr;
142068ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) {
142168ad4a33SUladzislau Rezki (Sony) /*
142268ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA.
142368ad4a33SUladzislau Rezki (Sony) *
142468ad4a33SUladzislau Rezki (Sony) * | |
142568ad4a33SUladzislau Rezki (Sony) * L V NVA V R
142668ad4a33SUladzislau Rezki (Sony) * |---|-------|---|
142768ad4a33SUladzislau Rezki (Sony) */
142882dd23e8SUladzislau Rezki (Sony) lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
142982dd23e8SUladzislau Rezki (Sony) if (unlikely(!lva)) {
143082dd23e8SUladzislau Rezki (Sony) /*
143182dd23e8SUladzislau Rezki (Sony) * For percpu allocator we do not do any pre-allocation
143282dd23e8SUladzislau Rezki (Sony) * and leave it as it is. The reason is it most likely
143382dd23e8SUladzislau Rezki (Sony) * never ends up with NE_FIT_TYPE splitting. In case of
143482dd23e8SUladzislau Rezki (Sony) * percpu allocations offsets and sizes are aligned to
143582dd23e8SUladzislau Rezki (Sony) * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
143682dd23e8SUladzislau Rezki (Sony) * are its main fitting cases.
143782dd23e8SUladzislau Rezki (Sony) *
143882dd23e8SUladzislau Rezki (Sony) * There are a few exceptions though, as an example it is
143982dd23e8SUladzislau Rezki (Sony) * a first allocation (early boot up) when we have "one"
144082dd23e8SUladzislau Rezki (Sony) * big free space that has to be split.
1441060650a2SUladzislau Rezki (Sony) *
1442060650a2SUladzislau Rezki (Sony) * Also we can hit this path in case of regular "vmap"
1443060650a2SUladzislau Rezki (Sony) * allocations, if "this" current CPU was not preloaded.
1444060650a2SUladzislau Rezki (Sony) * See the comment in alloc_vmap_area() why. If so, then
1445060650a2SUladzislau Rezki (Sony) * GFP_NOWAIT is used instead to get an extra object for
1446060650a2SUladzislau Rezki (Sony) * split purpose. That is rare and most time does not
1447060650a2SUladzislau Rezki (Sony) * occur.
1448060650a2SUladzislau Rezki (Sony) *
1449060650a2SUladzislau Rezki (Sony) * What happens if an allocation gets failed. Basically,
1450060650a2SUladzislau Rezki (Sony) * an "overflow" path is triggered to purge lazily freed
1451060650a2SUladzislau Rezki (Sony) * areas to free some memory, then, the "retry" path is
1452060650a2SUladzislau Rezki (Sony) * triggered to repeat one more time. See more details
1453060650a2SUladzislau Rezki (Sony) * in alloc_vmap_area() function.
145482dd23e8SUladzislau Rezki (Sony) */
145568ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
145682dd23e8SUladzislau Rezki (Sony) if (!lva)
145768ad4a33SUladzislau Rezki (Sony) return -1;
145882dd23e8SUladzislau Rezki (Sony) }
145968ad4a33SUladzislau Rezki (Sony)
146068ad4a33SUladzislau Rezki (Sony) /*
146168ad4a33SUladzislau Rezki (Sony) * Build the remainder.
146268ad4a33SUladzislau Rezki (Sony) */
146368ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start;
146468ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr;
146568ad4a33SUladzislau Rezki (Sony)
146668ad4a33SUladzislau Rezki (Sony) /*
146768ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size.
146868ad4a33SUladzislau Rezki (Sony) */
146968ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size;
147068ad4a33SUladzislau Rezki (Sony) } else {
147168ad4a33SUladzislau Rezki (Sony) return -1;
147268ad4a33SUladzislau Rezki (Sony) }
147368ad4a33SUladzislau Rezki (Sony)
147468ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) {
147568ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va);
147668ad4a33SUladzislau Rezki (Sony)
14772c929233SArnd Bergmann if (lva) /* type == NE_FIT_TYPE */
1478f9863be4SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, root, head);
147968ad4a33SUladzislau Rezki (Sony) }
148068ad4a33SUladzislau Rezki (Sony)
148168ad4a33SUladzislau Rezki (Sony) return 0;
148268ad4a33SUladzislau Rezki (Sony) }
148368ad4a33SUladzislau Rezki (Sony)
148468ad4a33SUladzislau Rezki (Sony) /*
148568ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success.
148668ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure.
148768ad4a33SUladzislau Rezki (Sony) */
148868ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long
__alloc_vmap_area(struct rb_root * root,struct list_head * head,unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend)1489f9863be4SUladzislau Rezki (Sony) __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1490f9863be4SUladzislau Rezki (Sony) unsigned long size, unsigned long align,
1491cacca6baSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend)
149268ad4a33SUladzislau Rezki (Sony) {
14939333fe98SUladzislau Rezki bool adjust_search_size = true;
149468ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr;
149568ad4a33SUladzislau Rezki (Sony) struct vmap_area *va;
149668ad4a33SUladzislau Rezki (Sony) int ret;
149768ad4a33SUladzislau Rezki (Sony)
14989333fe98SUladzislau Rezki /*
14999333fe98SUladzislau Rezki * Do not adjust when:
15009333fe98SUladzislau Rezki * a) align <= PAGE_SIZE, because it does not make any sense.
15019333fe98SUladzislau Rezki * All blocks(their start addresses) are at least PAGE_SIZE
15029333fe98SUladzislau Rezki * aligned anyway;
15039333fe98SUladzislau Rezki * b) a short range where a requested size corresponds to exactly
15049333fe98SUladzislau Rezki * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
15059333fe98SUladzislau Rezki * With adjusted search length an allocation would not succeed.
15069333fe98SUladzislau Rezki */
15079333fe98SUladzislau Rezki if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
15089333fe98SUladzislau Rezki adjust_search_size = false;
15099333fe98SUladzislau Rezki
1510f9863be4SUladzislau Rezki (Sony) va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
151168ad4a33SUladzislau Rezki (Sony) if (unlikely(!va))
151268ad4a33SUladzislau Rezki (Sony) return vend;
151368ad4a33SUladzislau Rezki (Sony)
151468ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart)
151568ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align);
151668ad4a33SUladzislau Rezki (Sony) else
151768ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align);
151868ad4a33SUladzislau Rezki (Sony)
151968ad4a33SUladzislau Rezki (Sony) /* Check the "vend" restriction. */
152068ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size > vend)
152168ad4a33SUladzislau Rezki (Sony) return vend;
152268ad4a33SUladzislau Rezki (Sony)
152368ad4a33SUladzislau Rezki (Sony) /* Update the free vmap_area. */
1524f9863be4SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
15251b23ff80SBaoquan He if (WARN_ON_ONCE(ret))
152668ad4a33SUladzislau Rezki (Sony) return vend;
152768ad4a33SUladzislau Rezki (Sony)
1528a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1529bd1264c3SSong Liu find_vmap_lowest_match_check(root, head, size, align);
1530a6cf4e0fSUladzislau Rezki (Sony) #endif
1531a6cf4e0fSUladzislau Rezki (Sony)
153268ad4a33SUladzislau Rezki (Sony) return nva_start_addr;
153368ad4a33SUladzislau Rezki (Sony) }
15344da56b99SChris Wilson
1535db64fe02SNick Piggin /*
1536d98c9e83SAndrey Ryabinin * Free a region of KVA allocated by alloc_vmap_area
1537d98c9e83SAndrey Ryabinin */
free_vmap_area(struct vmap_area * va)1538d98c9e83SAndrey Ryabinin static void free_vmap_area(struct vmap_area *va)
1539d98c9e83SAndrey Ryabinin {
1540d98c9e83SAndrey Ryabinin /*
1541d98c9e83SAndrey Ryabinin * Remove from the busy tree/list.
1542d98c9e83SAndrey Ryabinin */
1543d98c9e83SAndrey Ryabinin spin_lock(&vmap_area_lock);
1544d98c9e83SAndrey Ryabinin unlink_va(va, &vmap_area_root);
1545d98c9e83SAndrey Ryabinin spin_unlock(&vmap_area_lock);
1546d98c9e83SAndrey Ryabinin
1547d98c9e83SAndrey Ryabinin /*
1548d98c9e83SAndrey Ryabinin * Insert/Merge it back to the free tree/list.
1549d98c9e83SAndrey Ryabinin */
1550d98c9e83SAndrey Ryabinin spin_lock(&free_vmap_area_lock);
155196e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1552d98c9e83SAndrey Ryabinin spin_unlock(&free_vmap_area_lock);
1553d98c9e83SAndrey Ryabinin }
1554d98c9e83SAndrey Ryabinin
1555187f8cc4SUladzislau Rezki (Sony) static inline void
preload_this_cpu_lock(spinlock_t * lock,gfp_t gfp_mask,int node)1556187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1557187f8cc4SUladzislau Rezki (Sony) {
1558187f8cc4SUladzislau Rezki (Sony) struct vmap_area *va = NULL;
1559187f8cc4SUladzislau Rezki (Sony)
1560187f8cc4SUladzislau Rezki (Sony) /*
1561187f8cc4SUladzislau Rezki (Sony) * Preload this CPU with one extra vmap_area object. It is used
1562187f8cc4SUladzislau Rezki (Sony) * when fit type of free area is NE_FIT_TYPE. It guarantees that
1563187f8cc4SUladzislau Rezki (Sony) * a CPU that does an allocation is preloaded.
1564187f8cc4SUladzislau Rezki (Sony) *
1565187f8cc4SUladzislau Rezki (Sony) * We do it in non-atomic context, thus it allows us to use more
1566187f8cc4SUladzislau Rezki (Sony) * permissive allocation masks to be more stable under low memory
1567187f8cc4SUladzislau Rezki (Sony) * condition and high memory pressure.
1568187f8cc4SUladzislau Rezki (Sony) */
1569187f8cc4SUladzislau Rezki (Sony) if (!this_cpu_read(ne_fit_preload_node))
1570187f8cc4SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1571187f8cc4SUladzislau Rezki (Sony)
1572187f8cc4SUladzislau Rezki (Sony) spin_lock(lock);
1573187f8cc4SUladzislau Rezki (Sony)
1574187f8cc4SUladzislau Rezki (Sony) if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1575187f8cc4SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
1576187f8cc4SUladzislau Rezki (Sony) }
1577187f8cc4SUladzislau Rezki (Sony)
1578d98c9e83SAndrey Ryabinin /*
1579db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the
1580db64fe02SNick Piggin * vstart and vend.
1581db64fe02SNick Piggin */
alloc_vmap_area(unsigned long size,unsigned long align,unsigned long vstart,unsigned long vend,int node,gfp_t gfp_mask,unsigned long va_flags)1582db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size,
1583db64fe02SNick Piggin unsigned long align,
1584db64fe02SNick Piggin unsigned long vstart, unsigned long vend,
1585869176a0SBaoquan He int node, gfp_t gfp_mask,
1586869176a0SBaoquan He unsigned long va_flags)
1587db64fe02SNick Piggin {
1588187f8cc4SUladzislau Rezki (Sony) struct vmap_area *va;
158912e376a6SUladzislau Rezki (Sony) unsigned long freed;
15901da177e4SLinus Torvalds unsigned long addr;
1591db64fe02SNick Piggin int purged = 0;
1592d98c9e83SAndrey Ryabinin int ret;
1593db64fe02SNick Piggin
15947e4a32c0SHyunmin Lee if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
15957e4a32c0SHyunmin Lee return ERR_PTR(-EINVAL);
1596db64fe02SNick Piggin
159768ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized))
159868ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY);
159968ad4a33SUladzislau Rezki (Sony)
16005803ed29SChristoph Hellwig might_sleep();
1601f07116d7SUladzislau Rezki (Sony) gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
16024da56b99SChris Wilson
1603f07116d7SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1604db64fe02SNick Piggin if (unlikely(!va))
1605db64fe02SNick Piggin return ERR_PTR(-ENOMEM);
1606db64fe02SNick Piggin
16077f88f88fSCatalin Marinas /*
16087f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects
16097f88f88fSCatalin Marinas * to avoid false negatives.
16107f88f88fSCatalin Marinas */
1611f07116d7SUladzislau Rezki (Sony) kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
16127f88f88fSCatalin Marinas
1613db64fe02SNick Piggin retry:
1614187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1615f9863be4SUladzislau Rezki (Sony) addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
1616f9863be4SUladzislau Rezki (Sony) size, align, vstart, vend);
1617187f8cc4SUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock);
161868ad4a33SUladzislau Rezki (Sony)
1619cf243da6SUladzislau Rezki (Sony) trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
1620cf243da6SUladzislau Rezki (Sony)
162189699605SNick Piggin /*
162268ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is
162368ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path.
162489699605SNick Piggin */
162568ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend))
162689699605SNick Piggin goto overflow;
162789699605SNick Piggin
162889699605SNick Piggin va->va_start = addr;
162989699605SNick Piggin va->va_end = addr + size;
1630688fcbfcSPengfei Li va->vm = NULL;
1631869176a0SBaoquan He va->flags = va_flags;
163268ad4a33SUladzislau Rezki (Sony)
1633e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock);
1634e36176beSUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
163589699605SNick Piggin spin_unlock(&vmap_area_lock);
163689699605SNick Piggin
163761e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align));
163889699605SNick Piggin BUG_ON(va->va_start < vstart);
163989699605SNick Piggin BUG_ON(va->va_end > vend);
164089699605SNick Piggin
1641d98c9e83SAndrey Ryabinin ret = kasan_populate_vmalloc(addr, size);
1642d98c9e83SAndrey Ryabinin if (ret) {
1643d98c9e83SAndrey Ryabinin free_vmap_area(va);
1644d98c9e83SAndrey Ryabinin return ERR_PTR(ret);
1645d98c9e83SAndrey Ryabinin }
1646d98c9e83SAndrey Ryabinin
164789699605SNick Piggin return va;
164889699605SNick Piggin
16497766970cSNick Piggin overflow:
1650db64fe02SNick Piggin if (!purged) {
165177e50af0SThomas Gleixner reclaim_and_purge_vmap_areas();
1652db64fe02SNick Piggin purged = 1;
1653db64fe02SNick Piggin goto retry;
1654db64fe02SNick Piggin }
16554da56b99SChris Wilson
165612e376a6SUladzislau Rezki (Sony) freed = 0;
16574da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
165812e376a6SUladzislau Rezki (Sony)
16594da56b99SChris Wilson if (freed > 0) {
16604da56b99SChris Wilson purged = 0;
16614da56b99SChris Wilson goto retry;
16624da56b99SChris Wilson }
16634da56b99SChris Wilson
166403497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1665756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1666756a025fSJoe Perches size);
166768ad4a33SUladzislau Rezki (Sony)
166868ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va);
1669db64fe02SNick Piggin return ERR_PTR(-EBUSY);
1670db64fe02SNick Piggin }
1671db64fe02SNick Piggin
register_vmap_purge_notifier(struct notifier_block * nb)16724da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb)
16734da56b99SChris Wilson {
16744da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb);
16754da56b99SChris Wilson }
16764da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
16774da56b99SChris Wilson
unregister_vmap_purge_notifier(struct notifier_block * nb)16784da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb)
16794da56b99SChris Wilson {
16804da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
16814da56b99SChris Wilson }
16824da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
16834da56b99SChris Wilson
1684db64fe02SNick Piggin /*
1685db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up
1686db64fe02SNick Piggin * before attempting to purge with a TLB flush.
1687db64fe02SNick Piggin *
1688db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables
1689db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of
1690db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale
1691db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity
1692db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely
1693db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean
1694db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be
1695db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with
1696db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old
1697db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it
1698db64fe02SNick Piggin * becomes a problem on bigger systems.
1699db64fe02SNick Piggin */
lazy_max_pages(void)1700db64fe02SNick Piggin static unsigned long lazy_max_pages(void)
1701db64fe02SNick Piggin {
1702db64fe02SNick Piggin unsigned int log;
1703db64fe02SNick Piggin
1704db64fe02SNick Piggin log = fls(num_online_cpus());
1705db64fe02SNick Piggin
1706db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1707db64fe02SNick Piggin }
1708db64fe02SNick Piggin
17094d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1710db64fe02SNick Piggin
17110574ecd1SChristoph Hellwig /*
1712f0953a1bSIngo Molnar * Serialize vmap purging. There is no actual critical section protected
1713153090f2SBaoquan He * by this lock, but we want to avoid concurrent calls for performance
17140574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic.
17150574ecd1SChristoph Hellwig */
1716f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock);
17170574ecd1SChristoph Hellwig
171802b709dfSNick Piggin /* for per-CPU blocks */
171902b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void);
172002b709dfSNick Piggin
17213ee48b6aSCliff Wickman /*
1722db64fe02SNick Piggin * Purges all lazily-freed vmap areas.
1723db64fe02SNick Piggin */
__purge_vmap_area_lazy(unsigned long start,unsigned long end)17240574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1725db64fe02SNick Piggin {
17264d36e6f8SUladzislau Rezki (Sony) unsigned long resched_threshold;
17276030fd5fSUladzislau Rezki (Sony) unsigned int num_purged_areas = 0;
1728baa468a6SBaoquan He struct list_head local_purge_list;
172996e2db45SUladzislau Rezki (Sony) struct vmap_area *va, *n_va;
1730db64fe02SNick Piggin
17310574ecd1SChristoph Hellwig lockdep_assert_held(&vmap_purge_lock);
173202b709dfSNick Piggin
173396e2db45SUladzislau Rezki (Sony) spin_lock(&purge_vmap_area_lock);
173496e2db45SUladzislau Rezki (Sony) purge_vmap_area_root = RB_ROOT;
1735baa468a6SBaoquan He list_replace_init(&purge_vmap_area_list, &local_purge_list);
173696e2db45SUladzislau Rezki (Sony) spin_unlock(&purge_vmap_area_lock);
173796e2db45SUladzislau Rezki (Sony)
1738baa468a6SBaoquan He if (unlikely(list_empty(&local_purge_list)))
17396030fd5fSUladzislau Rezki (Sony) goto out;
174068571be9SUladzislau Rezki (Sony)
174196e2db45SUladzislau Rezki (Sony) start = min(start,
1742baa468a6SBaoquan He list_first_entry(&local_purge_list,
174396e2db45SUladzislau Rezki (Sony) struct vmap_area, list)->va_start);
174496e2db45SUladzislau Rezki (Sony)
174596e2db45SUladzislau Rezki (Sony) end = max(end,
1746baa468a6SBaoquan He list_last_entry(&local_purge_list,
174796e2db45SUladzislau Rezki (Sony) struct vmap_area, list)->va_end);
1748db64fe02SNick Piggin
17490574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end);
17504d36e6f8SUladzislau Rezki (Sony) resched_threshold = lazy_max_pages() << 1;
1751db64fe02SNick Piggin
1752e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock);
1753baa468a6SBaoquan He list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
17544d36e6f8SUladzislau Rezki (Sony) unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
17553c5c3cfbSDaniel Axtens unsigned long orig_start = va->va_start;
17563c5c3cfbSDaniel Axtens unsigned long orig_end = va->va_end;
1757763b218dSJoel Fernandes
1758dd3b8353SUladzislau Rezki (Sony) /*
1759dd3b8353SUladzislau Rezki (Sony) * Finally insert or merge lazily-freed area. It is
1760dd3b8353SUladzislau Rezki (Sony) * detached and there is no need to "unlink" it from
1761dd3b8353SUladzislau Rezki (Sony) * anything.
1762dd3b8353SUladzislau Rezki (Sony) */
176396e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
17643c5c3cfbSDaniel Axtens &free_vmap_area_list);
17653c5c3cfbSDaniel Axtens
17669c801f61SUladzislau Rezki (Sony) if (!va)
17679c801f61SUladzislau Rezki (Sony) continue;
17689c801f61SUladzislau Rezki (Sony)
17693c5c3cfbSDaniel Axtens if (is_vmalloc_or_module_addr((void *)orig_start))
17703c5c3cfbSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end,
17713c5c3cfbSDaniel Axtens va->va_start, va->va_end);
1772dd3b8353SUladzislau Rezki (Sony)
17734d36e6f8SUladzislau Rezki (Sony) atomic_long_sub(nr, &vmap_lazy_nr);
17746030fd5fSUladzislau Rezki (Sony) num_purged_areas++;
177568571be9SUladzislau Rezki (Sony)
17764d36e6f8SUladzislau Rezki (Sony) if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1777e36176beSUladzislau Rezki (Sony) cond_resched_lock(&free_vmap_area_lock);
1778763b218dSJoel Fernandes }
1779e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock);
17806030fd5fSUladzislau Rezki (Sony)
17816030fd5fSUladzislau Rezki (Sony) out:
17826030fd5fSUladzislau Rezki (Sony) trace_purge_vmap_area_lazy(start, end, num_purged_areas);
17836030fd5fSUladzislau Rezki (Sony) return num_purged_areas > 0;
1784db64fe02SNick Piggin }
1785db64fe02SNick Piggin
1786db64fe02SNick Piggin /*
178777e50af0SThomas Gleixner * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
1788db64fe02SNick Piggin */
reclaim_and_purge_vmap_areas(void)178977e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void)
179077e50af0SThomas Gleixner
1791db64fe02SNick Piggin {
1792f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock);
17930574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus();
17940574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0);
1795f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock);
1796db64fe02SNick Piggin }
1797db64fe02SNick Piggin
drain_vmap_area_work(struct work_struct * work)1798690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work)
1799690467c8SUladzislau Rezki (Sony) {
1800690467c8SUladzislau Rezki (Sony) unsigned long nr_lazy;
1801690467c8SUladzislau Rezki (Sony)
1802690467c8SUladzislau Rezki (Sony) do {
1803690467c8SUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock);
1804690467c8SUladzislau Rezki (Sony) __purge_vmap_area_lazy(ULONG_MAX, 0);
1805690467c8SUladzislau Rezki (Sony) mutex_unlock(&vmap_purge_lock);
1806690467c8SUladzislau Rezki (Sony)
1807690467c8SUladzislau Rezki (Sony) /* Recheck if further work is required. */
1808690467c8SUladzislau Rezki (Sony) nr_lazy = atomic_long_read(&vmap_lazy_nr);
1809690467c8SUladzislau Rezki (Sony) } while (nr_lazy > lazy_max_pages());
1810690467c8SUladzislau Rezki (Sony) }
1811690467c8SUladzislau Rezki (Sony)
1812db64fe02SNick Piggin /*
1813edd89818SUladzislau Rezki (Sony) * Free a vmap area, caller ensuring that the area has been unmapped,
1814edd89818SUladzislau Rezki (Sony) * unlinked and flush_cache_vunmap had been called for the correct
1815edd89818SUladzislau Rezki (Sony) * range previously.
1816db64fe02SNick Piggin */
free_vmap_area_noflush(struct vmap_area * va)181764141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va)
1818db64fe02SNick Piggin {
18198c4196feSUladzislau Rezki (Sony) unsigned long nr_lazy_max = lazy_max_pages();
18208c4196feSUladzislau Rezki (Sony) unsigned long va_start = va->va_start;
18214d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy;
182280c4bd7aSChris Wilson
1823edd89818SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!list_empty(&va->list)))
1824edd89818SUladzislau Rezki (Sony) return;
1825dd3b8353SUladzislau Rezki (Sony)
18264d36e6f8SUladzislau Rezki (Sony) nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
18274d36e6f8SUladzislau Rezki (Sony) PAGE_SHIFT, &vmap_lazy_nr);
182880c4bd7aSChris Wilson
182996e2db45SUladzislau Rezki (Sony) /*
183096e2db45SUladzislau Rezki (Sony) * Merge or place it to the purge tree/list.
183196e2db45SUladzislau Rezki (Sony) */
183296e2db45SUladzislau Rezki (Sony) spin_lock(&purge_vmap_area_lock);
183396e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area(va,
183496e2db45SUladzislau Rezki (Sony) &purge_vmap_area_root, &purge_vmap_area_list);
183596e2db45SUladzislau Rezki (Sony) spin_unlock(&purge_vmap_area_lock);
183680c4bd7aSChris Wilson
18378c4196feSUladzislau Rezki (Sony) trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
18388c4196feSUladzislau Rezki (Sony)
183996e2db45SUladzislau Rezki (Sony) /* After this point, we may free va at any time */
18408c4196feSUladzislau Rezki (Sony) if (unlikely(nr_lazy > nr_lazy_max))
1841690467c8SUladzislau Rezki (Sony) schedule_work(&drain_vmap_work);
1842db64fe02SNick Piggin }
1843db64fe02SNick Piggin
1844b29acbdcSNick Piggin /*
1845b29acbdcSNick Piggin * Free and unmap a vmap area
1846b29acbdcSNick Piggin */
free_unmap_vmap_area(struct vmap_area * va)1847b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va)
1848b29acbdcSNick Piggin {
1849b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end);
18504ad0ae8cSNicholas Piggin vunmap_range_noflush(va->va_start, va->va_end);
18518e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static())
185282a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end);
185382a2e924SChintan Pandya
1854c8eef01eSChristoph Hellwig free_vmap_area_noflush(va);
1855b29acbdcSNick Piggin }
1856b29acbdcSNick Piggin
find_vmap_area(unsigned long addr)1857993d0b28SMatthew Wilcox (Oracle) struct vmap_area *find_vmap_area(unsigned long addr)
1858db64fe02SNick Piggin {
1859db64fe02SNick Piggin struct vmap_area *va;
1860db64fe02SNick Piggin
1861db64fe02SNick Piggin spin_lock(&vmap_area_lock);
1862899c6efeSUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vmap_area_root);
1863db64fe02SNick Piggin spin_unlock(&vmap_area_lock);
1864db64fe02SNick Piggin
1865db64fe02SNick Piggin return va;
1866db64fe02SNick Piggin }
1867db64fe02SNick Piggin
find_unlink_vmap_area(unsigned long addr)1868edd89818SUladzislau Rezki (Sony) static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
1869edd89818SUladzislau Rezki (Sony) {
1870edd89818SUladzislau Rezki (Sony) struct vmap_area *va;
1871edd89818SUladzislau Rezki (Sony)
1872edd89818SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock);
1873edd89818SUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vmap_area_root);
1874edd89818SUladzislau Rezki (Sony) if (va)
1875edd89818SUladzislau Rezki (Sony) unlink_va(va, &vmap_area_root);
1876edd89818SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock);
1877edd89818SUladzislau Rezki (Sony)
1878edd89818SUladzislau Rezki (Sony) return va;
1879edd89818SUladzislau Rezki (Sony) }
1880edd89818SUladzislau Rezki (Sony)
1881db64fe02SNick Piggin /*** Per cpu kva allocator ***/
1882db64fe02SNick Piggin
1883db64fe02SNick Piggin /*
1884db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is
1885db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU.
1886db64fe02SNick Piggin */
1887db64fe02SNick Piggin /*
1888db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1889db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1890db64fe02SNick Piggin * instead (we just need a rough idea)
1891db64fe02SNick Piggin */
1892db64fe02SNick Piggin #if BITS_PER_LONG == 32
1893db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024)
1894db64fe02SNick Piggin #else
1895db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024)
1896db64fe02SNick Piggin #endif
1897db64fe02SNick Piggin
1898db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1899db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1900db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1901db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1902db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1903db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
1904f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \
1905f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1906db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1907f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1908db64fe02SNick Piggin
1909db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1910db64fe02SNick Piggin
191177e50af0SThomas Gleixner /*
191277e50af0SThomas Gleixner * Purge threshold to prevent overeager purging of fragmented blocks for
191377e50af0SThomas Gleixner * regular operations: Purge if vb->free is less than 1/4 of the capacity.
191477e50af0SThomas Gleixner */
191577e50af0SThomas Gleixner #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
191677e50af0SThomas Gleixner
1917869176a0SBaoquan He #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
1918869176a0SBaoquan He #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
1919869176a0SBaoquan He #define VMAP_FLAGS_MASK 0x3
1920869176a0SBaoquan He
1921db64fe02SNick Piggin struct vmap_block_queue {
1922db64fe02SNick Piggin spinlock_t lock;
1923db64fe02SNick Piggin struct list_head free;
1924062eacf5SUladzislau Rezki (Sony)
1925062eacf5SUladzislau Rezki (Sony) /*
1926062eacf5SUladzislau Rezki (Sony) * An xarray requires an extra memory dynamically to
1927062eacf5SUladzislau Rezki (Sony) * be allocated. If it is an issue, we can use rb-tree
1928062eacf5SUladzislau Rezki (Sony) * instead.
1929062eacf5SUladzislau Rezki (Sony) */
1930062eacf5SUladzislau Rezki (Sony) struct xarray vmap_blocks;
1931db64fe02SNick Piggin };
1932db64fe02SNick Piggin
1933db64fe02SNick Piggin struct vmap_block {
1934db64fe02SNick Piggin spinlock_t lock;
1935db64fe02SNick Piggin struct vmap_area *va;
1936db64fe02SNick Piggin unsigned long free, dirty;
1937d76f9954SBaoquan He DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
19387d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */
1939db64fe02SNick Piggin struct list_head free_list;
1940db64fe02SNick Piggin struct rcu_head rcu_head;
194102b709dfSNick Piggin struct list_head purge;
194288e0ad40SZhaoyang Huang unsigned int cpu;
1943db64fe02SNick Piggin };
1944db64fe02SNick Piggin
1945db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1946db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1947db64fe02SNick Piggin
1948db64fe02SNick Piggin /*
1949062eacf5SUladzislau Rezki (Sony) * In order to fast access to any "vmap_block" associated with a
1950062eacf5SUladzislau Rezki (Sony) * specific address, we use a hash.
1951062eacf5SUladzislau Rezki (Sony) *
1952062eacf5SUladzislau Rezki (Sony) * A per-cpu vmap_block_queue is used in both ways, to serialize
1953062eacf5SUladzislau Rezki (Sony) * an access to free block chains among CPUs(alloc path) and it
1954062eacf5SUladzislau Rezki (Sony) * also acts as a vmap_block hash(alloc/free paths). It means we
1955062eacf5SUladzislau Rezki (Sony) * overload it, since we already have the per-cpu array which is
1956062eacf5SUladzislau Rezki (Sony) * used as a hash table. When used as a hash a 'cpu' passed to
1957062eacf5SUladzislau Rezki (Sony) * per_cpu() is not actually a CPU but rather a hash index.
1958062eacf5SUladzislau Rezki (Sony) *
1959fa1c77c1SUladzislau Rezki (Sony) * A hash function is addr_to_vb_xa() which hashes any address
1960062eacf5SUladzislau Rezki (Sony) * to a specific index(in a hash) it belongs to. This then uses a
1961062eacf5SUladzislau Rezki (Sony) * per_cpu() macro to access an array with generated index.
1962062eacf5SUladzislau Rezki (Sony) *
1963062eacf5SUladzislau Rezki (Sony) * An example:
1964062eacf5SUladzislau Rezki (Sony) *
1965062eacf5SUladzislau Rezki (Sony) * CPU_1 CPU_2 CPU_0
1966062eacf5SUladzislau Rezki (Sony) * | | |
1967062eacf5SUladzislau Rezki (Sony) * V V V
1968062eacf5SUladzislau Rezki (Sony) * 0 10 20 30 40 50 60
1969062eacf5SUladzislau Rezki (Sony) * |------|------|------|------|------|------|...<vmap address space>
1970062eacf5SUladzislau Rezki (Sony) * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
1971062eacf5SUladzislau Rezki (Sony) *
1972062eacf5SUladzislau Rezki (Sony) * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
1973062eacf5SUladzislau Rezki (Sony) * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
1974062eacf5SUladzislau Rezki (Sony) *
1975062eacf5SUladzislau Rezki (Sony) * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
1976062eacf5SUladzislau Rezki (Sony) * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
1977062eacf5SUladzislau Rezki (Sony) *
1978062eacf5SUladzislau Rezki (Sony) * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
1979062eacf5SUladzislau Rezki (Sony) * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
1980062eacf5SUladzislau Rezki (Sony) *
1981062eacf5SUladzislau Rezki (Sony) * This technique almost always avoids lock contention on insert/remove,
1982062eacf5SUladzislau Rezki (Sony) * however xarray spinlocks protect against any contention that remains.
1983db64fe02SNick Piggin */
1984062eacf5SUladzislau Rezki (Sony) static struct xarray *
addr_to_vb_xa(unsigned long addr)1985fa1c77c1SUladzislau Rezki (Sony) addr_to_vb_xa(unsigned long addr)
1986062eacf5SUladzislau Rezki (Sony) {
198728acd531SUladzislau Rezki (Sony) int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
198828acd531SUladzislau Rezki (Sony)
198928acd531SUladzislau Rezki (Sony) /*
199028acd531SUladzislau Rezki (Sony) * Please note, nr_cpu_ids points on a highest set
199128acd531SUladzislau Rezki (Sony) * possible bit, i.e. we never invoke cpumask_next()
199228acd531SUladzislau Rezki (Sony) * if an index points on it which is nr_cpu_ids - 1.
199328acd531SUladzislau Rezki (Sony) */
199428acd531SUladzislau Rezki (Sony) if (!cpu_possible(index))
199528acd531SUladzislau Rezki (Sony) index = cpumask_next(index, cpu_possible_mask);
1996062eacf5SUladzislau Rezki (Sony)
1997062eacf5SUladzislau Rezki (Sony) return &per_cpu(vmap_block_queue, index).vmap_blocks;
1998062eacf5SUladzislau Rezki (Sony) }
1999db64fe02SNick Piggin
2000db64fe02SNick Piggin /*
2001db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory
2002db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be
2003db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a
2004db64fe02SNick Piggin * big problem.
2005db64fe02SNick Piggin */
2006db64fe02SNick Piggin
addr_to_vb_idx(unsigned long addr)2007db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr)
2008db64fe02SNick Piggin {
2009db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2010db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE;
2011db64fe02SNick Piggin return addr;
2012db64fe02SNick Piggin }
2013db64fe02SNick Piggin
vmap_block_vaddr(unsigned long va_start,unsigned long pages_off)2014cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2015cf725ce2SRoman Pen {
2016cf725ce2SRoman Pen unsigned long addr;
2017cf725ce2SRoman Pen
2018cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT);
2019cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2020cf725ce2SRoman Pen return (void *)addr;
2021cf725ce2SRoman Pen }
2022cf725ce2SRoman Pen
2023cf725ce2SRoman Pen /**
2024cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2025cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS
2026cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block
2027cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator
2028cf725ce2SRoman Pen *
2029a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2030cf725ce2SRoman Pen */
new_vmap_block(unsigned int order,gfp_t gfp_mask)2031cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2032db64fe02SNick Piggin {
2033db64fe02SNick Piggin struct vmap_block_queue *vbq;
2034db64fe02SNick Piggin struct vmap_block *vb;
2035db64fe02SNick Piggin struct vmap_area *va;
2036062eacf5SUladzislau Rezki (Sony) struct xarray *xa;
2037db64fe02SNick Piggin unsigned long vb_idx;
2038db64fe02SNick Piggin int node, err;
2039cf725ce2SRoman Pen void *vaddr;
2040db64fe02SNick Piggin
2041db64fe02SNick Piggin node = numa_node_id();
2042db64fe02SNick Piggin
2043db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block),
2044db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node);
2045db64fe02SNick Piggin if (unlikely(!vb))
2046db64fe02SNick Piggin return ERR_PTR(-ENOMEM);
2047db64fe02SNick Piggin
2048db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2049db64fe02SNick Piggin VMALLOC_START, VMALLOC_END,
2050869176a0SBaoquan He node, gfp_mask,
2051869176a0SBaoquan He VMAP_RAM|VMAP_BLOCK);
2052ddf9c6d4STobias Klauser if (IS_ERR(va)) {
2053db64fe02SNick Piggin kfree(vb);
2054e7d86340SJulia Lawall return ERR_CAST(va);
2055db64fe02SNick Piggin }
2056db64fe02SNick Piggin
2057cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0);
2058db64fe02SNick Piggin spin_lock_init(&vb->lock);
2059db64fe02SNick Piggin vb->va = va;
2060cf725ce2SRoman Pen /* At least something should be left free */
2061cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2062d76f9954SBaoquan He bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2063cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order);
2064db64fe02SNick Piggin vb->dirty = 0;
20657d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS;
20667d61bfe8SRoman Pen vb->dirty_max = 0;
2067d76f9954SBaoquan He bitmap_set(vb->used_map, 0, (1UL << order));
2068db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list);
20691b2770e2SWill Deacon vb->cpu = raw_smp_processor_id();
2070db64fe02SNick Piggin
2071fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(va->va_start);
2072db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start);
2073062eacf5SUladzislau Rezki (Sony) err = xa_insert(xa, vb_idx, vb, gfp_mask);
20740f14599cSMatthew Wilcox (Oracle) if (err) {
20750f14599cSMatthew Wilcox (Oracle) kfree(vb);
20760f14599cSMatthew Wilcox (Oracle) free_vmap_area(va);
20770f14599cSMatthew Wilcox (Oracle) return ERR_PTR(err);
20780f14599cSMatthew Wilcox (Oracle) }
207988e0ad40SZhaoyang Huang /*
208088e0ad40SZhaoyang Huang * list_add_tail_rcu could happened in another core
208188e0ad40SZhaoyang Huang * rather than vb->cpu due to task migration, which
208288e0ad40SZhaoyang Huang * is safe as list_add_tail_rcu will ensure the list's
208388e0ad40SZhaoyang Huang * integrity together with list_for_each_rcu from read
208488e0ad40SZhaoyang Huang * side.
208588e0ad40SZhaoyang Huang */
208688e0ad40SZhaoyang Huang vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
2087db64fe02SNick Piggin spin_lock(&vbq->lock);
208868ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free);
2089db64fe02SNick Piggin spin_unlock(&vbq->lock);
2090db64fe02SNick Piggin
2091cf725ce2SRoman Pen return vaddr;
2092db64fe02SNick Piggin }
2093db64fe02SNick Piggin
free_vmap_block(struct vmap_block * vb)2094db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb)
2095db64fe02SNick Piggin {
2096db64fe02SNick Piggin struct vmap_block *tmp;
2097062eacf5SUladzislau Rezki (Sony) struct xarray *xa;
2098db64fe02SNick Piggin
2099fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(vb->va->va_start);
2100062eacf5SUladzislau Rezki (Sony) tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2101db64fe02SNick Piggin BUG_ON(tmp != vb);
2102db64fe02SNick Piggin
2103edd89818SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock);
2104edd89818SUladzislau Rezki (Sony) unlink_va(vb->va, &vmap_area_root);
2105edd89818SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock);
2106edd89818SUladzislau Rezki (Sony)
210764141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va);
210822a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head);
2109db64fe02SNick Piggin }
2110db64fe02SNick Piggin
purge_fragmented_block(struct vmap_block * vb,struct list_head * purge_list,bool force_purge)2111ca5e46c3SThomas Gleixner static bool purge_fragmented_block(struct vmap_block *vb,
211288e0ad40SZhaoyang Huang struct list_head *purge_list, bool force_purge)
211302b709dfSNick Piggin {
211488e0ad40SZhaoyang Huang struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
211588e0ad40SZhaoyang Huang
2116ca5e46c3SThomas Gleixner if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2117ca5e46c3SThomas Gleixner vb->dirty == VMAP_BBMAP_BITS)
2118ca5e46c3SThomas Gleixner return false;
211902b709dfSNick Piggin
212077e50af0SThomas Gleixner /* Don't overeagerly purge usable blocks unless requested */
212177e50af0SThomas Gleixner if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
212277e50af0SThomas Gleixner return false;
212377e50af0SThomas Gleixner
2124ca5e46c3SThomas Gleixner /* prevent further allocs after releasing lock */
21257f48121eSThomas Gleixner WRITE_ONCE(vb->free, 0);
2126ca5e46c3SThomas Gleixner /* prevent purging it again */
21277f48121eSThomas Gleixner WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
21287d61bfe8SRoman Pen vb->dirty_min = 0;
21297d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS;
213002b709dfSNick Piggin spin_lock(&vbq->lock);
213102b709dfSNick Piggin list_del_rcu(&vb->free_list);
213202b709dfSNick Piggin spin_unlock(&vbq->lock);
2133ca5e46c3SThomas Gleixner list_add_tail(&vb->purge, purge_list);
2134ca5e46c3SThomas Gleixner return true;
213502b709dfSNick Piggin }
213602b709dfSNick Piggin
free_purged_blocks(struct list_head * purge_list)2137ca5e46c3SThomas Gleixner static void free_purged_blocks(struct list_head *purge_list)
2138ca5e46c3SThomas Gleixner {
2139ca5e46c3SThomas Gleixner struct vmap_block *vb, *n_vb;
2140ca5e46c3SThomas Gleixner
2141ca5e46c3SThomas Gleixner list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
214202b709dfSNick Piggin list_del(&vb->purge);
214302b709dfSNick Piggin free_vmap_block(vb);
214402b709dfSNick Piggin }
214502b709dfSNick Piggin }
214602b709dfSNick Piggin
purge_fragmented_blocks(int cpu)2147ca5e46c3SThomas Gleixner static void purge_fragmented_blocks(int cpu)
2148ca5e46c3SThomas Gleixner {
2149ca5e46c3SThomas Gleixner LIST_HEAD(purge);
2150ca5e46c3SThomas Gleixner struct vmap_block *vb;
2151ca5e46c3SThomas Gleixner struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2152ca5e46c3SThomas Gleixner
2153ca5e46c3SThomas Gleixner rcu_read_lock();
2154ca5e46c3SThomas Gleixner list_for_each_entry_rcu(vb, &vbq->free, free_list) {
21557f48121eSThomas Gleixner unsigned long free = READ_ONCE(vb->free);
21567f48121eSThomas Gleixner unsigned long dirty = READ_ONCE(vb->dirty);
21577f48121eSThomas Gleixner
21587f48121eSThomas Gleixner if (free + dirty != VMAP_BBMAP_BITS ||
21597f48121eSThomas Gleixner dirty == VMAP_BBMAP_BITS)
2160ca5e46c3SThomas Gleixner continue;
2161ca5e46c3SThomas Gleixner
2162ca5e46c3SThomas Gleixner spin_lock(&vb->lock);
216388e0ad40SZhaoyang Huang purge_fragmented_block(vb, &purge, true);
2164ca5e46c3SThomas Gleixner spin_unlock(&vb->lock);
2165ca5e46c3SThomas Gleixner }
2166ca5e46c3SThomas Gleixner rcu_read_unlock();
2167ca5e46c3SThomas Gleixner free_purged_blocks(&purge);
2168ca5e46c3SThomas Gleixner }
2169ca5e46c3SThomas Gleixner
purge_fragmented_blocks_allcpus(void)217002b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void)
217102b709dfSNick Piggin {
217202b709dfSNick Piggin int cpu;
217302b709dfSNick Piggin
217402b709dfSNick Piggin for_each_possible_cpu(cpu)
217502b709dfSNick Piggin purge_fragmented_blocks(cpu);
217602b709dfSNick Piggin }
217702b709dfSNick Piggin
vb_alloc(unsigned long size,gfp_t gfp_mask)2178db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2179db64fe02SNick Piggin {
2180db64fe02SNick Piggin struct vmap_block_queue *vbq;
2181db64fe02SNick Piggin struct vmap_block *vb;
2182cf725ce2SRoman Pen void *vaddr = NULL;
2183db64fe02SNick Piggin unsigned int order;
2184db64fe02SNick Piggin
2185891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size));
2186db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2187aa91c4d8SJan Kara if (WARN_ON(size == 0)) {
2188aa91c4d8SJan Kara /*
2189aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since
2190aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate
2191aa91c4d8SJan Kara * early.
2192aa91c4d8SJan Kara */
2193aa91c4d8SJan Kara return NULL;
2194aa91c4d8SJan Kara }
2195db64fe02SNick Piggin order = get_order(size);
2196db64fe02SNick Piggin
2197db64fe02SNick Piggin rcu_read_lock();
21983f804920SSebastian Andrzej Siewior vbq = raw_cpu_ptr(&vmap_block_queue);
2199db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2200cf725ce2SRoman Pen unsigned long pages_off;
2201db64fe02SNick Piggin
220243d76502SThomas Gleixner if (READ_ONCE(vb->free) < (1UL << order))
220343d76502SThomas Gleixner continue;
220443d76502SThomas Gleixner
2205db64fe02SNick Piggin spin_lock(&vb->lock);
2206cf725ce2SRoman Pen if (vb->free < (1UL << order)) {
2207cf725ce2SRoman Pen spin_unlock(&vb->lock);
2208cf725ce2SRoman Pen continue;
2209cf725ce2SRoman Pen }
221002b709dfSNick Piggin
2211cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free;
2212cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
221343d76502SThomas Gleixner WRITE_ONCE(vb->free, vb->free - (1UL << order));
2214d76f9954SBaoquan He bitmap_set(vb->used_map, pages_off, (1UL << order));
2215db64fe02SNick Piggin if (vb->free == 0) {
2216db64fe02SNick Piggin spin_lock(&vbq->lock);
2217de560423SNick Piggin list_del_rcu(&vb->free_list);
2218db64fe02SNick Piggin spin_unlock(&vbq->lock);
2219db64fe02SNick Piggin }
2220cf725ce2SRoman Pen
2221db64fe02SNick Piggin spin_unlock(&vb->lock);
2222db64fe02SNick Piggin break;
2223db64fe02SNick Piggin }
222402b709dfSNick Piggin
2225db64fe02SNick Piggin rcu_read_unlock();
2226db64fe02SNick Piggin
2227cf725ce2SRoman Pen /* Allocate new block if nothing was found */
2228cf725ce2SRoman Pen if (!vaddr)
2229cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask);
2230db64fe02SNick Piggin
2231cf725ce2SRoman Pen return vaddr;
2232db64fe02SNick Piggin }
2233db64fe02SNick Piggin
vb_free(unsigned long addr,unsigned long size)223478a0e8c4SChristoph Hellwig static void vb_free(unsigned long addr, unsigned long size)
2235db64fe02SNick Piggin {
2236db64fe02SNick Piggin unsigned long offset;
2237db64fe02SNick Piggin unsigned int order;
2238db64fe02SNick Piggin struct vmap_block *vb;
2239062eacf5SUladzislau Rezki (Sony) struct xarray *xa;
2240db64fe02SNick Piggin
2241891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size));
2242db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2243b29acbdcSNick Piggin
224478a0e8c4SChristoph Hellwig flush_cache_vunmap(addr, addr + size);
2245b29acbdcSNick Piggin
2246db64fe02SNick Piggin order = get_order(size);
224778a0e8c4SChristoph Hellwig offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2248062eacf5SUladzislau Rezki (Sony)
2249fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(addr);
2250062eacf5SUladzislau Rezki (Sony) vb = xa_load(xa, addr_to_vb_idx(addr));
2251062eacf5SUladzislau Rezki (Sony)
2252d76f9954SBaoquan He spin_lock(&vb->lock);
2253d76f9954SBaoquan He bitmap_clear(vb->used_map, offset, (1UL << order));
2254d76f9954SBaoquan He spin_unlock(&vb->lock);
2255db64fe02SNick Piggin
22564ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, addr + size);
225764141da5SJeremy Fitzhardinge
22588e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static())
225978a0e8c4SChristoph Hellwig flush_tlb_kernel_range(addr, addr + size);
226082a2e924SChintan Pandya
2261db64fe02SNick Piggin spin_lock(&vb->lock);
22627d61bfe8SRoman Pen
2263a09fad96SThomas Gleixner /* Expand the not yet TLB flushed dirty range */
22647d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset);
22657d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2266d086817dSMinChan Kim
22677f48121eSThomas Gleixner WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2268db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) {
2269de560423SNick Piggin BUG_ON(vb->free);
2270db64fe02SNick Piggin spin_unlock(&vb->lock);
2271db64fe02SNick Piggin free_vmap_block(vb);
2272db64fe02SNick Piggin } else
2273db64fe02SNick Piggin spin_unlock(&vb->lock);
2274db64fe02SNick Piggin }
2275db64fe02SNick Piggin
_vm_unmap_aliases(unsigned long start,unsigned long end,int flush)2276868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2277db64fe02SNick Piggin {
2278ca5e46c3SThomas Gleixner LIST_HEAD(purge_list);
2279db64fe02SNick Piggin int cpu;
2280db64fe02SNick Piggin
22819b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized))
22829b463334SJeremy Fitzhardinge return;
22839b463334SJeremy Fitzhardinge
2284ca5e46c3SThomas Gleixner mutex_lock(&vmap_purge_lock);
22855803ed29SChristoph Hellwig
2286db64fe02SNick Piggin for_each_possible_cpu(cpu) {
2287db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2288db64fe02SNick Piggin struct vmap_block *vb;
2289fc1e0d98SThomas Gleixner unsigned long idx;
2290db64fe02SNick Piggin
2291db64fe02SNick Piggin rcu_read_lock();
2292fc1e0d98SThomas Gleixner xa_for_each(&vbq->vmap_blocks, idx, vb) {
2293db64fe02SNick Piggin spin_lock(&vb->lock);
2294ca5e46c3SThomas Gleixner
2295ca5e46c3SThomas Gleixner /*
2296ca5e46c3SThomas Gleixner * Try to purge a fragmented block first. If it's
2297ca5e46c3SThomas Gleixner * not purgeable, check whether there is dirty
2298ca5e46c3SThomas Gleixner * space to be flushed.
2299ca5e46c3SThomas Gleixner */
230088e0ad40SZhaoyang Huang if (!purge_fragmented_block(vb, &purge_list, false) &&
2301a09fad96SThomas Gleixner vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
23027d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start;
2303db64fe02SNick Piggin unsigned long s, e;
2304b136be5eSJoonsoo Kim
23057d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT);
23067d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT);
2307db64fe02SNick Piggin
23087d61bfe8SRoman Pen start = min(s, start);
23097d61bfe8SRoman Pen end = max(e, end);
23107d61bfe8SRoman Pen
2311a09fad96SThomas Gleixner /* Prevent that this is flushed again */
2312a09fad96SThomas Gleixner vb->dirty_min = VMAP_BBMAP_BITS;
2313a09fad96SThomas Gleixner vb->dirty_max = 0;
2314a09fad96SThomas Gleixner
2315db64fe02SNick Piggin flush = 1;
2316db64fe02SNick Piggin }
2317db64fe02SNick Piggin spin_unlock(&vb->lock);
2318db64fe02SNick Piggin }
2319db64fe02SNick Piggin rcu_read_unlock();
2320db64fe02SNick Piggin }
2321ca5e46c3SThomas Gleixner free_purged_blocks(&purge_list);
2322db64fe02SNick Piggin
23230574ecd1SChristoph Hellwig if (!__purge_vmap_area_lazy(start, end) && flush)
23240574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end);
2325f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock);
2326db64fe02SNick Piggin }
2327868b104dSRick Edgecombe
2328868b104dSRick Edgecombe /**
2329868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2330868b104dSRick Edgecombe *
2331868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2332868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you
2333868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual
2334868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries
2335868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping).
2336868b104dSRick Edgecombe *
2337868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2338868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases
2339868b104dSRick Edgecombe * from the vmap layer.
2340868b104dSRick Edgecombe */
vm_unmap_aliases(void)2341868b104dSRick Edgecombe void vm_unmap_aliases(void)
2342868b104dSRick Edgecombe {
2343868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0;
2344868b104dSRick Edgecombe int flush = 0;
2345868b104dSRick Edgecombe
2346868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush);
2347868b104dSRick Edgecombe }
2348db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2349db64fe02SNick Piggin
2350db64fe02SNick Piggin /**
2351db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2352db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram
2353db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2354db64fe02SNick Piggin */
vm_unmap_ram(const void * mem,unsigned int count)2355db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count)
2356db64fe02SNick Piggin {
235765ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT;
23584aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(mem);
23599c3acf60SChristoph Hellwig struct vmap_area *va;
2360db64fe02SNick Piggin
23615803ed29SChristoph Hellwig might_sleep();
2362db64fe02SNick Piggin BUG_ON(!addr);
2363db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START);
2364db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END);
2365a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr));
2366db64fe02SNick Piggin
2367d98c9e83SAndrey Ryabinin kasan_poison_vmalloc(mem, size);
2368d98c9e83SAndrey Ryabinin
23699c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) {
237005e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size);
237178a0e8c4SChristoph Hellwig vb_free(addr, size);
23729c3acf60SChristoph Hellwig return;
23739c3acf60SChristoph Hellwig }
23749c3acf60SChristoph Hellwig
2375edd89818SUladzislau Rezki (Sony) va = find_unlink_vmap_area(addr);
237614687619SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va))
237714687619SUladzislau Rezki (Sony) return;
237814687619SUladzislau Rezki (Sony)
237905e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start,
238005e3ff95SChintan Pandya (va->va_end - va->va_start));
23819c3acf60SChristoph Hellwig free_unmap_vmap_area(va);
2382db64fe02SNick Piggin }
2383db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram);
2384db64fe02SNick Piggin
2385db64fe02SNick Piggin /**
2386db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2387db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped
2388db64fe02SNick Piggin * @count: number of pages
2389db64fe02SNick Piggin * @node: prefer to allocate data structures on this node
2390e99c97adSRandy Dunlap *
239136437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
239236437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life
239336437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through
239436437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in
239536437638SGioh Kim * the end. Please use this function for short-lived objects.
239636437638SGioh Kim *
2397e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure
2398db64fe02SNick Piggin */
vm_map_ram(struct page ** pages,unsigned int count,int node)2399d4efd79aSChristoph Hellwig void *vm_map_ram(struct page **pages, unsigned int count, int node)
2400db64fe02SNick Piggin {
240165ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT;
2402db64fe02SNick Piggin unsigned long addr;
2403db64fe02SNick Piggin void *mem;
2404db64fe02SNick Piggin
2405db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) {
2406db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL);
2407db64fe02SNick Piggin if (IS_ERR(mem))
2408db64fe02SNick Piggin return NULL;
2409db64fe02SNick Piggin addr = (unsigned long)mem;
2410db64fe02SNick Piggin } else {
2411db64fe02SNick Piggin struct vmap_area *va;
2412db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE,
2413869176a0SBaoquan He VMALLOC_START, VMALLOC_END,
2414869176a0SBaoquan He node, GFP_KERNEL, VMAP_RAM);
2415db64fe02SNick Piggin if (IS_ERR(va))
2416db64fe02SNick Piggin return NULL;
2417db64fe02SNick Piggin
2418db64fe02SNick Piggin addr = va->va_start;
2419db64fe02SNick Piggin mem = (void *)addr;
2420db64fe02SNick Piggin }
2421d98c9e83SAndrey Ryabinin
2422b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2423b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) {
2424db64fe02SNick Piggin vm_unmap_ram(mem, count);
2425db64fe02SNick Piggin return NULL;
2426db64fe02SNick Piggin }
2427b67177ecSNicholas Piggin
242823689e91SAndrey Konovalov /*
242923689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped.
243023689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for
243123689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
243223689e91SAndrey Konovalov */
2433f6e39794SAndrey Konovalov mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
243419f1c3acSAndrey Konovalov
2435db64fe02SNick Piggin return mem;
2436db64fe02SNick Piggin }
2437db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram);
2438db64fe02SNick Piggin
24394341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata;
244092eac168SMike Rapoport
vm_area_page_order(struct vm_struct * vm)2441121e6f32SNicholas Piggin static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2442121e6f32SNicholas Piggin {
2443121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2444121e6f32SNicholas Piggin return vm->page_order;
2445121e6f32SNicholas Piggin #else
2446121e6f32SNicholas Piggin return 0;
2447121e6f32SNicholas Piggin #endif
2448121e6f32SNicholas Piggin }
2449121e6f32SNicholas Piggin
set_vm_area_page_order(struct vm_struct * vm,unsigned int order)2450121e6f32SNicholas Piggin static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2451121e6f32SNicholas Piggin {
2452121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2453121e6f32SNicholas Piggin vm->page_order = order;
2454121e6f32SNicholas Piggin #else
2455121e6f32SNicholas Piggin BUG_ON(order != 0);
2456121e6f32SNicholas Piggin #endif
2457121e6f32SNicholas Piggin }
2458121e6f32SNicholas Piggin
2459f0aa6617STejun Heo /**
2460be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot
2461be9b7335SNicolas Pitre * @vm: vm_struct to add
2462be9b7335SNicolas Pitre *
2463be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before
2464be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
2465be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero.
2466be9b7335SNicolas Pitre *
2467be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2468be9b7335SNicolas Pitre */
vm_area_add_early(struct vm_struct * vm)2469be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm)
2470be9b7335SNicolas Pitre {
2471be9b7335SNicolas Pitre struct vm_struct *tmp, **p;
2472be9b7335SNicolas Pitre
2473be9b7335SNicolas Pitre BUG_ON(vmap_initialized);
2474be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2475be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) {
2476be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size);
2477be9b7335SNicolas Pitre break;
2478be9b7335SNicolas Pitre } else
2479be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr);
2480be9b7335SNicolas Pitre }
2481be9b7335SNicolas Pitre vm->next = *p;
2482be9b7335SNicolas Pitre *p = vm;
2483be9b7335SNicolas Pitre }
2484be9b7335SNicolas Pitre
2485be9b7335SNicolas Pitre /**
2486f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot
2487f0aa6617STejun Heo * @vm: vm_struct to register
2488c0c0a293STejun Heo * @align: requested alignment
2489f0aa6617STejun Heo *
2490f0aa6617STejun Heo * This function is used to register kernel vm area before
2491f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain
2492f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return,
2493f0aa6617STejun Heo * vm->addr contains the allocated address.
2494f0aa6617STejun Heo *
2495f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2496f0aa6617STejun Heo */
vm_area_register_early(struct vm_struct * vm,size_t align)2497c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2498f0aa6617STejun Heo {
24990eb68437SKefeng Wang unsigned long addr = ALIGN(VMALLOC_START, align);
25000eb68437SKefeng Wang struct vm_struct *cur, **p;
2501f0aa6617STejun Heo
25020eb68437SKefeng Wang BUG_ON(vmap_initialized);
2503c0c0a293STejun Heo
25040eb68437SKefeng Wang for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
25050eb68437SKefeng Wang if ((unsigned long)cur->addr - addr >= vm->size)
25060eb68437SKefeng Wang break;
25070eb68437SKefeng Wang addr = ALIGN((unsigned long)cur->addr + cur->size, align);
25080eb68437SKefeng Wang }
25090eb68437SKefeng Wang
25100eb68437SKefeng Wang BUG_ON(addr > VMALLOC_END - vm->size);
2511c0c0a293STejun Heo vm->addr = (void *)addr;
25120eb68437SKefeng Wang vm->next = *p;
25130eb68437SKefeng Wang *p = vm;
25143252b1d8SKefeng Wang kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
2515f0aa6617STejun Heo }
2516f0aa6617STejun Heo
vmap_init_free_space(void)251768ad4a33SUladzislau Rezki (Sony) static void vmap_init_free_space(void)
251868ad4a33SUladzislau Rezki (Sony) {
251968ad4a33SUladzislau Rezki (Sony) unsigned long vmap_start = 1;
252068ad4a33SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX;
252168ad4a33SUladzislau Rezki (Sony) struct vmap_area *busy, *free;
252268ad4a33SUladzislau Rezki (Sony)
252368ad4a33SUladzislau Rezki (Sony) /*
252468ad4a33SUladzislau Rezki (Sony) * B F B B B F
252568ad4a33SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|-
252668ad4a33SUladzislau Rezki (Sony) * | The KVA space |
252768ad4a33SUladzislau Rezki (Sony) * |<--------------------------------->|
252868ad4a33SUladzislau Rezki (Sony) */
252968ad4a33SUladzislau Rezki (Sony) list_for_each_entry(busy, &vmap_area_list, list) {
253068ad4a33SUladzislau Rezki (Sony) if (busy->va_start - vmap_start > 0) {
253168ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
253268ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) {
253368ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start;
253468ad4a33SUladzislau Rezki (Sony) free->va_end = busy->va_start;
253568ad4a33SUladzislau Rezki (Sony)
253668ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL,
253768ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root,
253868ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list);
253968ad4a33SUladzislau Rezki (Sony) }
254068ad4a33SUladzislau Rezki (Sony) }
254168ad4a33SUladzislau Rezki (Sony)
254268ad4a33SUladzislau Rezki (Sony) vmap_start = busy->va_end;
254368ad4a33SUladzislau Rezki (Sony) }
254468ad4a33SUladzislau Rezki (Sony)
254568ad4a33SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) {
254668ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
254768ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) {
254868ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start;
254968ad4a33SUladzislau Rezki (Sony) free->va_end = vmap_end;
255068ad4a33SUladzislau Rezki (Sony)
255168ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL,
255268ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root,
255368ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list);
255468ad4a33SUladzislau Rezki (Sony) }
255568ad4a33SUladzislau Rezki (Sony) }
255668ad4a33SUladzislau Rezki (Sony) }
255768ad4a33SUladzislau Rezki (Sony)
setup_vmalloc_vm_locked(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)2558e36176beSUladzislau Rezki (Sony) static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2559e36176beSUladzislau Rezki (Sony) struct vmap_area *va, unsigned long flags, const void *caller)
2560cf88c790STejun Heo {
2561cf88c790STejun Heo vm->flags = flags;
2562cf88c790STejun Heo vm->addr = (void *)va->va_start;
2563cf88c790STejun Heo vm->size = va->va_end - va->va_start;
2564cf88c790STejun Heo vm->caller = caller;
2565db1aecafSMinchan Kim va->vm = vm;
2566e36176beSUladzislau Rezki (Sony) }
2567e36176beSUladzislau Rezki (Sony)
setup_vmalloc_vm(struct vm_struct * vm,struct vmap_area * va,unsigned long flags,const void * caller)2568e36176beSUladzislau Rezki (Sony) static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2569e36176beSUladzislau Rezki (Sony) unsigned long flags, const void *caller)
2570e36176beSUladzislau Rezki (Sony) {
2571e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock);
2572e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vm, va, flags, caller);
2573c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock);
2574f5252e00SMitsuo Hayasaka }
2575cf88c790STejun Heo
clear_vm_uninitialized_flag(struct vm_struct * vm)257620fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2577f5252e00SMitsuo Hayasaka {
2578d4033afdSJoonsoo Kim /*
257920fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED,
2580d4033afdSJoonsoo Kim * we should make sure that vm has proper values.
2581d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info().
2582d4033afdSJoonsoo Kim */
2583d4033afdSJoonsoo Kim smp_wmb();
258420fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED;
2585cf88c790STejun Heo }
2586cf88c790STejun Heo
__get_vm_area_node(unsigned long size,unsigned long align,unsigned long shift,unsigned long flags,unsigned long start,unsigned long end,int node,gfp_t gfp_mask,const void * caller)2587db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size,
25887ca3027bSDaniel Axtens unsigned long align, unsigned long shift, unsigned long flags,
25897ca3027bSDaniel Axtens unsigned long start, unsigned long end, int node,
25907ca3027bSDaniel Axtens gfp_t gfp_mask, const void *caller)
2591db64fe02SNick Piggin {
25920006526dSKautuk Consul struct vmap_area *va;
2593db64fe02SNick Piggin struct vm_struct *area;
2594d98c9e83SAndrey Ryabinin unsigned long requested_size = size;
25951da177e4SLinus Torvalds
259652fd24caSGiridhar Pemmasani BUG_ON(in_interrupt());
25977ca3027bSDaniel Axtens size = ALIGN(size, 1ul << shift);
259831be8309SOGAWA Hirofumi if (unlikely(!size))
259931be8309SOGAWA Hirofumi return NULL;
26001da177e4SLinus Torvalds
2601252e5c6eSzijun_hu if (flags & VM_IOREMAP)
2602252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size),
2603252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER);
2604252e5c6eSzijun_hu
2605cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
26061da177e4SLinus Torvalds if (unlikely(!area))
26071da177e4SLinus Torvalds return NULL;
26081da177e4SLinus Torvalds
260971394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD))
26101da177e4SLinus Torvalds size += PAGE_SIZE;
26111da177e4SLinus Torvalds
2612869176a0SBaoquan He va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
2613db64fe02SNick Piggin if (IS_ERR(va)) {
2614db64fe02SNick Piggin kfree(area);
2615db64fe02SNick Piggin return NULL;
26161da177e4SLinus Torvalds }
26171da177e4SLinus Torvalds
2618d98c9e83SAndrey Ryabinin setup_vmalloc_vm(area, va, flags, caller);
26193c5c3cfbSDaniel Axtens
262019f1c3acSAndrey Konovalov /*
262119f1c3acSAndrey Konovalov * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
262219f1c3acSAndrey Konovalov * best-effort approach, as they can be mapped outside of vmalloc code.
262319f1c3acSAndrey Konovalov * For VM_ALLOC mappings, the pages are marked as accessible after
262419f1c3acSAndrey Konovalov * getting mapped in __vmalloc_node_range().
262523689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for
262623689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
262719f1c3acSAndrey Konovalov */
262819f1c3acSAndrey Konovalov if (!(flags & VM_ALLOC))
262923689e91SAndrey Konovalov area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
2630f6e39794SAndrey Konovalov KASAN_VMALLOC_PROT_NORMAL);
26311d96320fSAndrey Konovalov
26321da177e4SLinus Torvalds return area;
26331da177e4SLinus Torvalds }
26341da177e4SLinus Torvalds
__get_vm_area_caller(unsigned long size,unsigned long flags,unsigned long start,unsigned long end,const void * caller)2635c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2636c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end,
26375e6cafc8SMarek Szyprowski const void *caller)
2638c2968612SBenjamin Herrenschmidt {
26397ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
26407ca3027bSDaniel Axtens NUMA_NO_NODE, GFP_KERNEL, caller);
2641c2968612SBenjamin Herrenschmidt }
2642c2968612SBenjamin Herrenschmidt
26431da177e4SLinus Torvalds /**
2644183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area
26451da177e4SLinus Torvalds * @size: size of the area
26461da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
26471da177e4SLinus Torvalds *
26481da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area,
26491da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor
26501da177e4SLinus Torvalds * on success or %NULL on failure.
2651a862f68aSMike Rapoport *
2652a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure.
26531da177e4SLinus Torvalds */
get_vm_area(unsigned long size,unsigned long flags)26541da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
26551da177e4SLinus Torvalds {
26567ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
26577ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END,
265800ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL,
265900ef2d2fSDavid Rientjes __builtin_return_address(0));
266023016969SChristoph Lameter }
266123016969SChristoph Lameter
get_vm_area_caller(unsigned long size,unsigned long flags,const void * caller)266223016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
26635e6cafc8SMarek Szyprowski const void *caller)
266423016969SChristoph Lameter {
26657ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
26667ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END,
266700ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller);
26681da177e4SLinus Torvalds }
26691da177e4SLinus Torvalds
2670e9da6e99SMarek Szyprowski /**
2671e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area
2672e9da6e99SMarek Szyprowski * @addr: base address
2673e9da6e99SMarek Szyprowski *
2674e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it.
2675e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned
2676e9da6e99SMarek Szyprowski * pointer valid.
2677a862f68aSMike Rapoport *
267874640617SHui Su * Return: the area descriptor on success or %NULL on failure.
2679e9da6e99SMarek Szyprowski */
find_vm_area(const void * addr)2680e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr)
268183342314SNick Piggin {
2682db64fe02SNick Piggin struct vmap_area *va;
268383342314SNick Piggin
2684db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr);
2685688fcbfcSPengfei Li if (!va)
26867856dfebSAndi Kleen return NULL;
2687688fcbfcSPengfei Li
2688688fcbfcSPengfei Li return va->vm;
26897856dfebSAndi Kleen }
26907856dfebSAndi Kleen
26911da177e4SLinus Torvalds /**
2692183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area
26931da177e4SLinus Torvalds * @addr: base address
26941da177e4SLinus Torvalds *
26951da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it.
26961da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe
26977856dfebSAndi Kleen * on SMP machines, except for its size or flags.
2698a862f68aSMike Rapoport *
269974640617SHui Su * Return: the area descriptor on success or %NULL on failure.
27001da177e4SLinus Torvalds */
remove_vm_area(const void * addr)2701b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr)
27021da177e4SLinus Torvalds {
2703db64fe02SNick Piggin struct vmap_area *va;
270475c59ce7SChristoph Hellwig struct vm_struct *vm;
2705db64fe02SNick Piggin
27065803ed29SChristoph Hellwig might_sleep();
27075803ed29SChristoph Hellwig
270817d3ef43SChristoph Hellwig if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
270917d3ef43SChristoph Hellwig addr))
2710db64fe02SNick Piggin return NULL;
271117d3ef43SChristoph Hellwig
271275c59ce7SChristoph Hellwig va = find_unlink_vmap_area((unsigned long)addr);
271375c59ce7SChristoph Hellwig if (!va || !va->vm)
271475c59ce7SChristoph Hellwig return NULL;
271575c59ce7SChristoph Hellwig vm = va->vm;
271617d3ef43SChristoph Hellwig
271717d3ef43SChristoph Hellwig debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
271817d3ef43SChristoph Hellwig debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
271975c59ce7SChristoph Hellwig kasan_free_module_shadow(vm);
272017d3ef43SChristoph Hellwig kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
272117d3ef43SChristoph Hellwig
272275c59ce7SChristoph Hellwig free_unmap_vmap_area(va);
272375c59ce7SChristoph Hellwig return vm;
27241da177e4SLinus Torvalds }
27251da177e4SLinus Torvalds
set_area_direct_map(const struct vm_struct * area,int (* set_direct_map)(struct page * page))2726868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area,
2727868b104dSRick Edgecombe int (*set_direct_map)(struct page *page))
2728868b104dSRick Edgecombe {
2729868b104dSRick Edgecombe int i;
2730868b104dSRick Edgecombe
2731121e6f32SNicholas Piggin /* HUGE_VMALLOC passes small pages to set_direct_map */
2732868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++)
2733868b104dSRick Edgecombe if (page_address(area->pages[i]))
2734868b104dSRick Edgecombe set_direct_map(area->pages[i]);
2735868b104dSRick Edgecombe }
2736868b104dSRick Edgecombe
27379e5fa0aeSChristoph Hellwig /*
27389e5fa0aeSChristoph Hellwig * Flush the vm mapping and reset the direct map.
27399e5fa0aeSChristoph Hellwig */
vm_reset_perms(struct vm_struct * area)27409e5fa0aeSChristoph Hellwig static void vm_reset_perms(struct vm_struct *area)
2741868b104dSRick Edgecombe {
2742868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0;
2743121e6f32SNicholas Piggin unsigned int page_order = vm_area_page_order(area);
274431e67340SRick Edgecombe int flush_dmap = 0;
2745868b104dSRick Edgecombe int i;
2746868b104dSRick Edgecombe
2747868b104dSRick Edgecombe /*
27489e5fa0aeSChristoph Hellwig * Find the start and end range of the direct mappings to make sure that
2749868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map.
2750868b104dSRick Edgecombe */
2751121e6f32SNicholas Piggin for (i = 0; i < area->nr_pages; i += 1U << page_order) {
27528e41f872SRick Edgecombe unsigned long addr = (unsigned long)page_address(area->pages[i]);
27539e5fa0aeSChristoph Hellwig
27548e41f872SRick Edgecombe if (addr) {
2755121e6f32SNicholas Piggin unsigned long page_size;
2756121e6f32SNicholas Piggin
2757121e6f32SNicholas Piggin page_size = PAGE_SIZE << page_order;
2758868b104dSRick Edgecombe start = min(addr, start);
2759121e6f32SNicholas Piggin end = max(addr + page_size, end);
276031e67340SRick Edgecombe flush_dmap = 1;
2761868b104dSRick Edgecombe }
2762868b104dSRick Edgecombe }
2763868b104dSRick Edgecombe
2764868b104dSRick Edgecombe /*
2765868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if
2766868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and
2767868b104dSRick Edgecombe * reset the direct map permissions to the default.
2768868b104dSRick Edgecombe */
2769868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush);
277031e67340SRick Edgecombe _vm_unmap_aliases(start, end, flush_dmap);
2771868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush);
2772868b104dSRick Edgecombe }
2773868b104dSRick Edgecombe
delayed_vfree_work(struct work_struct * w)2774208162f4SChristoph Hellwig static void delayed_vfree_work(struct work_struct *w)
27751da177e4SLinus Torvalds {
2776208162f4SChristoph Hellwig struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
2777208162f4SChristoph Hellwig struct llist_node *t, *llnode;
27781da177e4SLinus Torvalds
2779208162f4SChristoph Hellwig llist_for_each_safe(llnode, t, llist_del_all(&p->list))
27805d3d31d6SChristoph Hellwig vfree(llnode);
2781bf22e37aSAndrey Ryabinin }
2782bf22e37aSAndrey Ryabinin
2783bf22e37aSAndrey Ryabinin /**
2784bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc()
2785bf22e37aSAndrey Ryabinin * @addr: memory base address
2786bf22e37aSAndrey Ryabinin *
2787bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context
2788bf22e37aSAndrey Ryabinin * except NMIs.
2789bf22e37aSAndrey Ryabinin */
vfree_atomic(const void * addr)2790bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr)
2791bf22e37aSAndrey Ryabinin {
279201e2e839SChristoph Hellwig struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2793bf22e37aSAndrey Ryabinin
279401e2e839SChristoph Hellwig BUG_ON(in_nmi());
2795bf22e37aSAndrey Ryabinin kmemleak_free(addr);
2796bf22e37aSAndrey Ryabinin
279701e2e839SChristoph Hellwig /*
279801e2e839SChristoph Hellwig * Use raw_cpu_ptr() because this can be called from preemptible
279901e2e839SChristoph Hellwig * context. Preemption is absolutely fine here, because the llist_add()
280001e2e839SChristoph Hellwig * implementation is lockless, so it works even if we are adding to
280101e2e839SChristoph Hellwig * another cpu's list. schedule_work() should be fine with this too.
280201e2e839SChristoph Hellwig */
280301e2e839SChristoph Hellwig if (addr && llist_add((struct llist_node *)addr, &p->list))
280401e2e839SChristoph Hellwig schedule_work(&p->wq);
2805c67dc624SRoman Penyaev }
2806c67dc624SRoman Penyaev
28071da177e4SLinus Torvalds /**
2808fa307474SMatthew Wilcox (Oracle) * vfree - Release memory allocated by vmalloc()
2809fa307474SMatthew Wilcox (Oracle) * @addr: Memory base address
28101da177e4SLinus Torvalds *
2811fa307474SMatthew Wilcox (Oracle) * Free the virtually continuous memory area starting at @addr, as obtained
2812fa307474SMatthew Wilcox (Oracle) * from one of the vmalloc() family of APIs. This will usually also free the
2813fa307474SMatthew Wilcox (Oracle) * physical memory underlying the virtual allocation, but that memory is
2814fa307474SMatthew Wilcox (Oracle) * reference counted, so it will not be freed until the last user goes away.
28151da177e4SLinus Torvalds *
2816fa307474SMatthew Wilcox (Oracle) * If @addr is NULL, no operation is performed.
281732fcfd40SAl Viro *
2818fa307474SMatthew Wilcox (Oracle) * Context:
28193ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context.
2820fa307474SMatthew Wilcox (Oracle) * Must not be called in NMI context (strictly speaking, it could be
2821fa307474SMatthew Wilcox (Oracle) * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2822f0953a1bSIngo Molnar * conventions for vfree() arch-dependent would be a really bad idea).
28231da177e4SLinus Torvalds */
vfree(const void * addr)2824b3bdda02SChristoph Lameter void vfree(const void *addr)
28251da177e4SLinus Torvalds {
282679311c1fSChristoph Hellwig struct vm_struct *vm;
282779311c1fSChristoph Hellwig int i;
282879311c1fSChristoph Hellwig
282901e2e839SChristoph Hellwig if (unlikely(in_interrupt())) {
283001e2e839SChristoph Hellwig vfree_atomic(addr);
283132fcfd40SAl Viro return;
283201e2e839SChristoph Hellwig }
283301e2e839SChristoph Hellwig
28341da177e4SLinus Torvalds BUG_ON(in_nmi());
283589219d37SCatalin Marinas kmemleak_free(addr);
283601e2e839SChristoph Hellwig might_sleep();
283732fcfd40SAl Viro
2838bf22e37aSAndrey Ryabinin if (!addr)
2839bf22e37aSAndrey Ryabinin return;
2840c67dc624SRoman Penyaev
284179311c1fSChristoph Hellwig vm = remove_vm_area(addr);
284279311c1fSChristoph Hellwig if (unlikely(!vm)) {
284379311c1fSChristoph Hellwig WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
284479311c1fSChristoph Hellwig addr);
284579311c1fSChristoph Hellwig return;
284679311c1fSChristoph Hellwig }
284779311c1fSChristoph Hellwig
28489e5fa0aeSChristoph Hellwig if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
28499e5fa0aeSChristoph Hellwig vm_reset_perms(vm);
285079311c1fSChristoph Hellwig for (i = 0; i < vm->nr_pages; i++) {
285179311c1fSChristoph Hellwig struct page *page = vm->pages[i];
285279311c1fSChristoph Hellwig
285379311c1fSChristoph Hellwig BUG_ON(!page);
2854*90ae5b7aSMatthew Wilcox (Oracle) if (!(vm->flags & VM_MAP_PUT_PAGES))
285579311c1fSChristoph Hellwig mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
285679311c1fSChristoph Hellwig /*
285779311c1fSChristoph Hellwig * High-order allocs for huge vmallocs are split, so
285879311c1fSChristoph Hellwig * can be freed as an array of order-0 allocations
285979311c1fSChristoph Hellwig */
2860dcc1be11SLorenzo Stoakes __free_page(page);
286179311c1fSChristoph Hellwig cond_resched();
286279311c1fSChristoph Hellwig }
2863*90ae5b7aSMatthew Wilcox (Oracle) if (!(vm->flags & VM_MAP_PUT_PAGES))
286479311c1fSChristoph Hellwig atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
286579311c1fSChristoph Hellwig kvfree(vm->pages);
286679311c1fSChristoph Hellwig kfree(vm);
28671da177e4SLinus Torvalds }
28681da177e4SLinus Torvalds EXPORT_SYMBOL(vfree);
28691da177e4SLinus Torvalds
28701da177e4SLinus Torvalds /**
28711da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap()
28721da177e4SLinus Torvalds * @addr: memory base address
28731da177e4SLinus Torvalds *
28741da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr,
28751da177e4SLinus Torvalds * which was created from the page array passed to vmap().
28761da177e4SLinus Torvalds *
287780e93effSPekka Enberg * Must not be called in interrupt context.
28781da177e4SLinus Torvalds */
vunmap(const void * addr)2879b3bdda02SChristoph Lameter void vunmap(const void *addr)
28801da177e4SLinus Torvalds {
288179311c1fSChristoph Hellwig struct vm_struct *vm;
288279311c1fSChristoph Hellwig
28831da177e4SLinus Torvalds BUG_ON(in_interrupt());
288434754b69SPeter Zijlstra might_sleep();
288579311c1fSChristoph Hellwig
288679311c1fSChristoph Hellwig if (!addr)
288779311c1fSChristoph Hellwig return;
288879311c1fSChristoph Hellwig vm = remove_vm_area(addr);
288979311c1fSChristoph Hellwig if (unlikely(!vm)) {
289079311c1fSChristoph Hellwig WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
289179311c1fSChristoph Hellwig addr);
289279311c1fSChristoph Hellwig return;
289379311c1fSChristoph Hellwig }
289479311c1fSChristoph Hellwig kfree(vm);
28951da177e4SLinus Torvalds }
28961da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap);
28971da177e4SLinus Torvalds
28981da177e4SLinus Torvalds /**
28991da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space
29001da177e4SLinus Torvalds * @pages: array of page pointers
29011da177e4SLinus Torvalds * @count: number of pages to map
29021da177e4SLinus Torvalds * @flags: vm_area->flags
29031da177e4SLinus Torvalds * @prot: page protection for the mapping
29041da177e4SLinus Torvalds *
2905b944afc9SChristoph Hellwig * Maps @count pages from @pages into contiguous kernel virtual space.
2906b944afc9SChristoph Hellwig * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2907b944afc9SChristoph Hellwig * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2908b944afc9SChristoph Hellwig * are transferred from the caller to vmap(), and will be freed / dropped when
2909b944afc9SChristoph Hellwig * vfree() is called on the return value.
2910a862f68aSMike Rapoport *
2911a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure
29121da177e4SLinus Torvalds */
vmap(struct page ** pages,unsigned int count,unsigned long flags,pgprot_t prot)29131da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count,
29141da177e4SLinus Torvalds unsigned long flags, pgprot_t prot)
29151da177e4SLinus Torvalds {
29161da177e4SLinus Torvalds struct vm_struct *area;
2917b67177ecSNicholas Piggin unsigned long addr;
291865ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */
29191da177e4SLinus Torvalds
292034754b69SPeter Zijlstra might_sleep();
292134754b69SPeter Zijlstra
292237f3605eSChristoph Hellwig if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
292337f3605eSChristoph Hellwig return NULL;
292437f3605eSChristoph Hellwig
2925bd1a8fb2SPeter Zijlstra /*
2926bd1a8fb2SPeter Zijlstra * Your top guard is someone else's bottom guard. Not having a top
2927bd1a8fb2SPeter Zijlstra * guard compromises someone else's mappings too.
2928bd1a8fb2SPeter Zijlstra */
2929bd1a8fb2SPeter Zijlstra if (WARN_ON_ONCE(flags & VM_NO_GUARD))
2930bd1a8fb2SPeter Zijlstra flags &= ~VM_NO_GUARD;
2931bd1a8fb2SPeter Zijlstra
2932ca79b0c2SArun KS if (count > totalram_pages())
29331da177e4SLinus Torvalds return NULL;
29341da177e4SLinus Torvalds
293565ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT;
293665ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0));
29371da177e4SLinus Torvalds if (!area)
29381da177e4SLinus Torvalds return NULL;
293923016969SChristoph Lameter
2940b67177ecSNicholas Piggin addr = (unsigned long)area->addr;
2941b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2942b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) {
29431da177e4SLinus Torvalds vunmap(area->addr);
29441da177e4SLinus Torvalds return NULL;
29451da177e4SLinus Torvalds }
29461da177e4SLinus Torvalds
2947c22ee528SMiaohe Lin if (flags & VM_MAP_PUT_PAGES) {
2948b944afc9SChristoph Hellwig area->pages = pages;
2949c22ee528SMiaohe Lin area->nr_pages = count;
2950c22ee528SMiaohe Lin }
29511da177e4SLinus Torvalds return area->addr;
29521da177e4SLinus Torvalds }
29531da177e4SLinus Torvalds EXPORT_SYMBOL(vmap);
29541da177e4SLinus Torvalds
29553e9a9e25SChristoph Hellwig #ifdef CONFIG_VMAP_PFN
29563e9a9e25SChristoph Hellwig struct vmap_pfn_data {
29573e9a9e25SChristoph Hellwig unsigned long *pfns;
29583e9a9e25SChristoph Hellwig pgprot_t prot;
29593e9a9e25SChristoph Hellwig unsigned int idx;
29603e9a9e25SChristoph Hellwig };
29613e9a9e25SChristoph Hellwig
vmap_pfn_apply(pte_t * pte,unsigned long addr,void * private)29623e9a9e25SChristoph Hellwig static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
29633e9a9e25SChristoph Hellwig {
29643e9a9e25SChristoph Hellwig struct vmap_pfn_data *data = private;
2965b3f78e74SRyan Roberts unsigned long pfn = data->pfns[data->idx];
2966b3f78e74SRyan Roberts pte_t ptent;
29673e9a9e25SChristoph Hellwig
2968b3f78e74SRyan Roberts if (WARN_ON_ONCE(pfn_valid(pfn)))
29693e9a9e25SChristoph Hellwig return -EINVAL;
2970b3f78e74SRyan Roberts
2971b3f78e74SRyan Roberts ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
2972b3f78e74SRyan Roberts set_pte_at(&init_mm, addr, pte, ptent);
2973b3f78e74SRyan Roberts
2974b3f78e74SRyan Roberts data->idx++;
29753e9a9e25SChristoph Hellwig return 0;
29763e9a9e25SChristoph Hellwig }
29773e9a9e25SChristoph Hellwig
29783e9a9e25SChristoph Hellwig /**
29793e9a9e25SChristoph Hellwig * vmap_pfn - map an array of PFNs into virtually contiguous space
29803e9a9e25SChristoph Hellwig * @pfns: array of PFNs
29813e9a9e25SChristoph Hellwig * @count: number of pages to map
29823e9a9e25SChristoph Hellwig * @prot: page protection for the mapping
29833e9a9e25SChristoph Hellwig *
29843e9a9e25SChristoph Hellwig * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
29853e9a9e25SChristoph Hellwig * the start address of the mapping.
29863e9a9e25SChristoph Hellwig */
vmap_pfn(unsigned long * pfns,unsigned int count,pgprot_t prot)29873e9a9e25SChristoph Hellwig void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
29883e9a9e25SChristoph Hellwig {
29893e9a9e25SChristoph Hellwig struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
29903e9a9e25SChristoph Hellwig struct vm_struct *area;
29913e9a9e25SChristoph Hellwig
29923e9a9e25SChristoph Hellwig area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
29933e9a9e25SChristoph Hellwig __builtin_return_address(0));
29943e9a9e25SChristoph Hellwig if (!area)
29953e9a9e25SChristoph Hellwig return NULL;
29963e9a9e25SChristoph Hellwig if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
29973e9a9e25SChristoph Hellwig count * PAGE_SIZE, vmap_pfn_apply, &data)) {
29983e9a9e25SChristoph Hellwig free_vm_area(area);
29993e9a9e25SChristoph Hellwig return NULL;
30003e9a9e25SChristoph Hellwig }
3001a50420c7SAlexandre Ghiti
3002a50420c7SAlexandre Ghiti flush_cache_vmap((unsigned long)area->addr,
3003a50420c7SAlexandre Ghiti (unsigned long)area->addr + count * PAGE_SIZE);
3004a50420c7SAlexandre Ghiti
30053e9a9e25SChristoph Hellwig return area->addr;
30063e9a9e25SChristoph Hellwig }
30073e9a9e25SChristoph Hellwig EXPORT_SYMBOL_GPL(vmap_pfn);
30083e9a9e25SChristoph Hellwig #endif /* CONFIG_VMAP_PFN */
30093e9a9e25SChristoph Hellwig
301012b9f873SUladzislau Rezki static inline unsigned int
vm_area_alloc_pages(gfp_t gfp,int nid,unsigned int order,unsigned int nr_pages,struct page ** pages)301112b9f873SUladzislau Rezki vm_area_alloc_pages(gfp_t gfp, int nid,
3012343ab817SUladzislau Rezki (Sony) unsigned int order, unsigned int nr_pages, struct page **pages)
301312b9f873SUladzislau Rezki {
301412b9f873SUladzislau Rezki unsigned int nr_allocated = 0;
3015e9c3cda4SMichal Hocko gfp_t alloc_gfp = gfp;
3016c55d3564SHailong.Liu bool nofail = gfp & __GFP_NOFAIL;
3017ffb29b1cSChen Wandun struct page *page;
3018ffb29b1cSChen Wandun int i;
301912b9f873SUladzislau Rezki
302012b9f873SUladzislau Rezki /*
302112b9f873SUladzislau Rezki * For order-0 pages we make use of bulk allocator, if
302212b9f873SUladzislau Rezki * the page array is partly or not at all populated due
302312b9f873SUladzislau Rezki * to fails, fallback to a single page allocator that is
302412b9f873SUladzislau Rezki * more permissive.
302512b9f873SUladzislau Rezki */
3026c00b6b96SChen Wandun if (!order) {
3027e9c3cda4SMichal Hocko /* bulk allocator doesn't support nofail req. officially */
30289376130cSMichal Hocko gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
30299376130cSMichal Hocko
3030343ab817SUladzislau Rezki (Sony) while (nr_allocated < nr_pages) {
3031343ab817SUladzislau Rezki (Sony) unsigned int nr, nr_pages_request;
3032343ab817SUladzislau Rezki (Sony)
3033343ab817SUladzislau Rezki (Sony) /*
3034343ab817SUladzislau Rezki (Sony) * A maximum allowed request is hard-coded and is 100
3035343ab817SUladzislau Rezki (Sony) * pages per call. That is done in order to prevent a
3036343ab817SUladzislau Rezki (Sony) * long preemption off scenario in the bulk-allocator
3037343ab817SUladzislau Rezki (Sony) * so the range is [1:100].
3038343ab817SUladzislau Rezki (Sony) */
3039343ab817SUladzislau Rezki (Sony) nr_pages_request = min(100U, nr_pages - nr_allocated);
3040343ab817SUladzislau Rezki (Sony)
3041c00b6b96SChen Wandun /* memory allocation should consider mempolicy, we can't
3042c00b6b96SChen Wandun * wrongly use nearest node when nid == NUMA_NO_NODE,
3043c00b6b96SChen Wandun * otherwise memory may be allocated in only one node,
304498af39d5SYixuan Cao * but mempolicy wants to alloc memory by interleaving.
3045c00b6b96SChen Wandun */
3046c00b6b96SChen Wandun if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
30479376130cSMichal Hocko nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
3048c00b6b96SChen Wandun nr_pages_request,
3049c00b6b96SChen Wandun pages + nr_allocated);
3050c00b6b96SChen Wandun
3051c00b6b96SChen Wandun else
30529376130cSMichal Hocko nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
3053c00b6b96SChen Wandun nr_pages_request,
3054c00b6b96SChen Wandun pages + nr_allocated);
3055343ab817SUladzislau Rezki (Sony)
3056343ab817SUladzislau Rezki (Sony) nr_allocated += nr;
3057343ab817SUladzislau Rezki (Sony) cond_resched();
3058343ab817SUladzislau Rezki (Sony)
3059343ab817SUladzislau Rezki (Sony) /*
3060343ab817SUladzislau Rezki (Sony) * If zero or pages were obtained partly,
3061343ab817SUladzislau Rezki (Sony) * fallback to a single page allocator.
3062343ab817SUladzislau Rezki (Sony) */
3063343ab817SUladzislau Rezki (Sony) if (nr != nr_pages_request)
3064343ab817SUladzislau Rezki (Sony) break;
3065343ab817SUladzislau Rezki (Sony) }
3066e9c3cda4SMichal Hocko } else if (gfp & __GFP_NOFAIL) {
3067e9c3cda4SMichal Hocko /*
3068e9c3cda4SMichal Hocko * Higher order nofail allocations are really expensive and
3069e9c3cda4SMichal Hocko * potentially dangerous (pre-mature OOM, disruptive reclaim
3070e9c3cda4SMichal Hocko * and compaction etc.
3071e9c3cda4SMichal Hocko */
3072e9c3cda4SMichal Hocko alloc_gfp &= ~__GFP_NOFAIL;
30733b8000aeSNicholas Piggin }
307412b9f873SUladzislau Rezki
307512b9f873SUladzislau Rezki /* High-order pages or fallback path if "bulk" fails. */
3076ffb29b1cSChen Wandun while (nr_allocated < nr_pages) {
3077c55d3564SHailong.Liu if (!nofail && fatal_signal_pending(current))
3078dd544141SVasily Averin break;
3079dd544141SVasily Averin
3080ffb29b1cSChen Wandun if (nid == NUMA_NO_NODE)
3081e9c3cda4SMichal Hocko page = alloc_pages(alloc_gfp, order);
3082ffb29b1cSChen Wandun else
3083e9c3cda4SMichal Hocko page = alloc_pages_node(nid, alloc_gfp, order);
3084de7bad86SHailong Liu if (unlikely(!page))
308512b9f873SUladzislau Rezki break;
3086e9c3cda4SMichal Hocko
30873b8000aeSNicholas Piggin /*
30883b8000aeSNicholas Piggin * Higher order allocations must be able to be treated as
30893b8000aeSNicholas Piggin * indepdenent small pages by callers (as they can with
30903b8000aeSNicholas Piggin * small-page vmallocs). Some drivers do their own refcounting
30913b8000aeSNicholas Piggin * on vmalloc_to_page() pages, some use page->mapping,
30923b8000aeSNicholas Piggin * page->lru, etc.
30933b8000aeSNicholas Piggin */
30943b8000aeSNicholas Piggin if (order)
30953b8000aeSNicholas Piggin split_page(page, order);
309612b9f873SUladzislau Rezki
309712b9f873SUladzislau Rezki /*
309812b9f873SUladzislau Rezki * Careful, we allocate and map page-order pages, but
309912b9f873SUladzislau Rezki * tracking is done per PAGE_SIZE page so as to keep the
310012b9f873SUladzislau Rezki * vm_struct APIs independent of the physical/mapped size.
310112b9f873SUladzislau Rezki */
310212b9f873SUladzislau Rezki for (i = 0; i < (1U << order); i++)
310312b9f873SUladzislau Rezki pages[nr_allocated + i] = page + i;
310412b9f873SUladzislau Rezki
310512b9f873SUladzislau Rezki cond_resched();
310612b9f873SUladzislau Rezki nr_allocated += 1U << order;
310712b9f873SUladzislau Rezki }
310812b9f873SUladzislau Rezki
310912b9f873SUladzislau Rezki return nr_allocated;
311012b9f873SUladzislau Rezki }
311112b9f873SUladzislau Rezki
__vmalloc_area_node(struct vm_struct * area,gfp_t gfp_mask,pgprot_t prot,unsigned int page_shift,int node)3112e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3113121e6f32SNicholas Piggin pgprot_t prot, unsigned int page_shift,
3114121e6f32SNicholas Piggin int node)
31151da177e4SLinus Torvalds {
3116930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
31179376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL;
3118121e6f32SNicholas Piggin unsigned long addr = (unsigned long)area->addr;
3119121e6f32SNicholas Piggin unsigned long size = get_vm_area_size(area);
312034fe6537SAndrew Morton unsigned long array_size;
3121121e6f32SNicholas Piggin unsigned int nr_small_pages = size >> PAGE_SHIFT;
3122121e6f32SNicholas Piggin unsigned int page_order;
3123451769ebSMichal Hocko unsigned int flags;
3124451769ebSMichal Hocko int ret;
31251da177e4SLinus Torvalds
3126121e6f32SNicholas Piggin array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
312780b1d8fdSLorenzo Stoakes
3128f255935bSChristoph Hellwig if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3129f255935bSChristoph Hellwig gfp_mask |= __GFP_HIGHMEM;
31301da177e4SLinus Torvalds
31311da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */
31328757d5faSJan Kiszka if (array_size > PAGE_SIZE) {
31335c1f4e69SUladzislau Rezki (Sony) area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
3134f255935bSChristoph Hellwig area->caller);
3135286e1ea3SAndrew Morton } else {
31365c1f4e69SUladzislau Rezki (Sony) area->pages = kmalloc_node(array_size, nested_gfp, node);
3137286e1ea3SAndrew Morton }
31387ea36242SAustin Kim
31395c1f4e69SUladzislau Rezki (Sony) if (!area->pages) {
3140c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL,
3141f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to allocated page array size %lu",
3142d70bec8cSNicholas Piggin nr_small_pages * PAGE_SIZE, array_size);
3143cd61413bSUladzislau Rezki (Sony) free_vm_area(area);
31441da177e4SLinus Torvalds return NULL;
31451da177e4SLinus Torvalds }
31461da177e4SLinus Torvalds
3147121e6f32SNicholas Piggin set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3148121e6f32SNicholas Piggin page_order = vm_area_page_order(area);
3149121e6f32SNicholas Piggin
3150c3d77172SUladzislau Rezki (Sony) area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
3151c3d77172SUladzislau Rezki (Sony) node, page_order, nr_small_pages, area->pages);
31525c1f4e69SUladzislau Rezki (Sony)
315397105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
31544e5aa1f4SShakeel Butt if (gfp_mask & __GFP_ACCOUNT) {
31553b8000aeSNicholas Piggin int i;
31564e5aa1f4SShakeel Butt
31573b8000aeSNicholas Piggin for (i = 0; i < area->nr_pages; i++)
31583b8000aeSNicholas Piggin mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
31594e5aa1f4SShakeel Butt }
31605c1f4e69SUladzislau Rezki (Sony)
31615c1f4e69SUladzislau Rezki (Sony) /*
31625c1f4e69SUladzislau Rezki (Sony) * If not enough pages were obtained to accomplish an
3163f41f036bSChristoph Hellwig * allocation request, free them via vfree() if any.
31645c1f4e69SUladzislau Rezki (Sony) */
31655c1f4e69SUladzislau Rezki (Sony) if (area->nr_pages != nr_small_pages) {
316695a301eeSLorenzo Stoakes /*
316795a301eeSLorenzo Stoakes * vm_area_alloc_pages() can fail due to insufficient memory but
316895a301eeSLorenzo Stoakes * also:-
316995a301eeSLorenzo Stoakes *
317095a301eeSLorenzo Stoakes * - a pending fatal signal
317195a301eeSLorenzo Stoakes * - insufficient huge page-order pages
317295a301eeSLorenzo Stoakes *
317395a301eeSLorenzo Stoakes * Since we always retry allocations at order-0 in the huge page
317495a301eeSLorenzo Stoakes * case a warning for either is spurious.
317595a301eeSLorenzo Stoakes */
317695a301eeSLorenzo Stoakes if (!fatal_signal_pending(current) && page_order == 0)
3177c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL,
317895a301eeSLorenzo Stoakes "vmalloc error: size %lu, failed to allocate pages",
317995a301eeSLorenzo Stoakes area->nr_pages * PAGE_SIZE);
31801da177e4SLinus Torvalds goto fail;
31811da177e4SLinus Torvalds }
3182121e6f32SNicholas Piggin
3183451769ebSMichal Hocko /*
3184451769ebSMichal Hocko * page tables allocations ignore external gfp mask, enforce it
3185451769ebSMichal Hocko * by the scope API
3186451769ebSMichal Hocko */
3187451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3188451769ebSMichal Hocko flags = memalloc_nofs_save();
3189451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3190451769ebSMichal Hocko flags = memalloc_noio_save();
3191451769ebSMichal Hocko
31929376130cSMichal Hocko do {
3193451769ebSMichal Hocko ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3194451769ebSMichal Hocko page_shift);
31959376130cSMichal Hocko if (nofail && (ret < 0))
31969376130cSMichal Hocko schedule_timeout_uninterruptible(1);
31979376130cSMichal Hocko } while (nofail && (ret < 0));
3198451769ebSMichal Hocko
3199451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3200451769ebSMichal Hocko memalloc_nofs_restore(flags);
3201451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3202451769ebSMichal Hocko memalloc_noio_restore(flags);
3203451769ebSMichal Hocko
3204451769ebSMichal Hocko if (ret < 0) {
3205c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL,
3206f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to map pages",
3207d70bec8cSNicholas Piggin area->nr_pages * PAGE_SIZE);
32081da177e4SLinus Torvalds goto fail;
3209d70bec8cSNicholas Piggin }
3210ed1f324cSChristoph Hellwig
32111da177e4SLinus Torvalds return area->addr;
32121da177e4SLinus Torvalds
32131da177e4SLinus Torvalds fail:
3214f41f036bSChristoph Hellwig vfree(area->addr);
32151da177e4SLinus Torvalds return NULL;
32161da177e4SLinus Torvalds }
32171da177e4SLinus Torvalds
3218d0a21265SDavid Rientjes /**
3219d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory
3220d0a21265SDavid Rientjes * @size: allocation size
3221d0a21265SDavid Rientjes * @align: desired alignment
3222d0a21265SDavid Rientjes * @start: vm area range start
3223d0a21265SDavid Rientjes * @end: vm area range end
3224d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator
3225d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages
3226cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
322700ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE
3228d0a21265SDavid Rientjes * @caller: caller's return address
3229d0a21265SDavid Rientjes *
3230d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level
3231b7d90e7aSMichal Hocko * allocator with @gfp_mask flags. Please note that the full set of gfp
323230d3f011SMichal Hocko * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
323330d3f011SMichal Hocko * supported.
323430d3f011SMichal Hocko * Zone modifiers are not supported. From the reclaim modifiers
323530d3f011SMichal Hocko * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
323630d3f011SMichal Hocko * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
323730d3f011SMichal Hocko * __GFP_RETRY_MAYFAIL are not supported).
323830d3f011SMichal Hocko *
323930d3f011SMichal Hocko * __GFP_NOWARN can be used to suppress failures messages.
3240b7d90e7aSMichal Hocko *
3241b7d90e7aSMichal Hocko * Map them into contiguous kernel virtual space, using a pagetable
3242b7d90e7aSMichal Hocko * protection of @prot.
3243a862f68aSMike Rapoport *
3244a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure
3245d0a21265SDavid Rientjes */
__vmalloc_node_range(unsigned long size,unsigned long align,unsigned long start,unsigned long end,gfp_t gfp_mask,pgprot_t prot,unsigned long vm_flags,int node,const void * caller)3246d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align,
3247d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask,
3248cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node,
3249cb9e3c29SAndrey Ryabinin const void *caller)
3250930fc45aSChristoph Lameter {
3251d0a21265SDavid Rientjes struct vm_struct *area;
325219f1c3acSAndrey Konovalov void *ret;
3253f6e39794SAndrey Konovalov kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3254d0a21265SDavid Rientjes unsigned long real_size = size;
3255121e6f32SNicholas Piggin unsigned long real_align = align;
3256121e6f32SNicholas Piggin unsigned int shift = PAGE_SHIFT;
3257d0a21265SDavid Rientjes
3258d70bec8cSNicholas Piggin if (WARN_ON_ONCE(!size))
3259d70bec8cSNicholas Piggin return NULL;
3260d70bec8cSNicholas Piggin
3261d70bec8cSNicholas Piggin if ((size >> PAGE_SHIFT) > totalram_pages()) {
3262d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL,
3263f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, exceeds total pages",
3264f4bdfeafSUladzislau Rezki (Sony) real_size);
3265d70bec8cSNicholas Piggin return NULL;
3266121e6f32SNicholas Piggin }
3267d0a21265SDavid Rientjes
3268559089e0SSong Liu if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3269121e6f32SNicholas Piggin unsigned long size_per_node;
3270121e6f32SNicholas Piggin
3271121e6f32SNicholas Piggin /*
3272121e6f32SNicholas Piggin * Try huge pages. Only try for PAGE_KERNEL allocations,
3273121e6f32SNicholas Piggin * others like modules don't yet expect huge pages in
3274121e6f32SNicholas Piggin * their allocations due to apply_to_page_range not
3275121e6f32SNicholas Piggin * supporting them.
3276121e6f32SNicholas Piggin */
3277121e6f32SNicholas Piggin
3278121e6f32SNicholas Piggin size_per_node = size;
3279121e6f32SNicholas Piggin if (node == NUMA_NO_NODE)
3280121e6f32SNicholas Piggin size_per_node /= num_online_nodes();
32813382bbeeSChristophe Leroy if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3282121e6f32SNicholas Piggin shift = PMD_SHIFT;
32833382bbeeSChristophe Leroy else
32843382bbeeSChristophe Leroy shift = arch_vmap_pte_supported_shift(size_per_node);
32853382bbeeSChristophe Leroy
3286121e6f32SNicholas Piggin align = max(real_align, 1UL << shift);
3287121e6f32SNicholas Piggin size = ALIGN(real_size, 1UL << shift);
3288121e6f32SNicholas Piggin }
3289121e6f32SNicholas Piggin
3290121e6f32SNicholas Piggin again:
32917ca3027bSDaniel Axtens area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
32927ca3027bSDaniel Axtens VM_UNINITIALIZED | vm_flags, start, end, node,
32937ca3027bSDaniel Axtens gfp_mask, caller);
3294d70bec8cSNicholas Piggin if (!area) {
32959376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL;
3296d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL,
32979376130cSMichal Hocko "vmalloc error: size %lu, vm_struct allocation failed%s",
32989376130cSMichal Hocko real_size, (nofail) ? ". Retrying." : "");
32999376130cSMichal Hocko if (nofail) {
33009376130cSMichal Hocko schedule_timeout_uninterruptible(1);
33019376130cSMichal Hocko goto again;
33029376130cSMichal Hocko }
3303de7d2b56SJoe Perches goto fail;
3304d70bec8cSNicholas Piggin }
3305d0a21265SDavid Rientjes
3306f6e39794SAndrey Konovalov /*
3307f6e39794SAndrey Konovalov * Prepare arguments for __vmalloc_area_node() and
3308f6e39794SAndrey Konovalov * kasan_unpoison_vmalloc().
3309f6e39794SAndrey Konovalov */
3310f6e39794SAndrey Konovalov if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3311f6e39794SAndrey Konovalov if (kasan_hw_tags_enabled()) {
331201d92c7fSAndrey Konovalov /*
331301d92c7fSAndrey Konovalov * Modify protection bits to allow tagging.
3314f6e39794SAndrey Konovalov * This must be done before mapping.
331501d92c7fSAndrey Konovalov */
331601d92c7fSAndrey Konovalov prot = arch_vmap_pgprot_tagged(prot);
331701d92c7fSAndrey Konovalov
331823689e91SAndrey Konovalov /*
3319f6e39794SAndrey Konovalov * Skip page_alloc poisoning and zeroing for physical
3320f6e39794SAndrey Konovalov * pages backing VM_ALLOC mapping. Memory is instead
3321f6e39794SAndrey Konovalov * poisoned and zeroed by kasan_unpoison_vmalloc().
332223689e91SAndrey Konovalov */
33230a54864fSPeter Collingbourne gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
332423689e91SAndrey Konovalov }
332523689e91SAndrey Konovalov
3326f6e39794SAndrey Konovalov /* Take note that the mapping is PAGE_KERNEL. */
3327f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3328f6e39794SAndrey Konovalov }
3329f6e39794SAndrey Konovalov
333001d92c7fSAndrey Konovalov /* Allocate physical pages and map them into vmalloc space. */
333119f1c3acSAndrey Konovalov ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
333219f1c3acSAndrey Konovalov if (!ret)
3333121e6f32SNicholas Piggin goto fail;
333489219d37SCatalin Marinas
333523689e91SAndrey Konovalov /*
333623689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped.
33376c2f761dSAndrey Konovalov * The condition for setting KASAN_VMALLOC_INIT should complement the
33386c2f761dSAndrey Konovalov * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
33396c2f761dSAndrey Konovalov * to make sure that memory is initialized under the same conditions.
3340f6e39794SAndrey Konovalov * Tag-based KASAN modes only assign tags to normal non-executable
3341f6e39794SAndrey Konovalov * allocations, see __kasan_unpoison_vmalloc().
334223689e91SAndrey Konovalov */
3343f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
33446c2f761dSAndrey Konovalov if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
33456c2f761dSAndrey Konovalov (gfp_mask & __GFP_SKIP_ZERO))
334623689e91SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_INIT;
3347f6e39794SAndrey Konovalov /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
334823689e91SAndrey Konovalov area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
334919f1c3acSAndrey Konovalov
335089219d37SCatalin Marinas /*
335120fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED
335220fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized.
33534341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here.
3354f5252e00SMitsuo Hayasaka */
335520fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area);
3356f5252e00SMitsuo Hayasaka
33577ca3027bSDaniel Axtens size = PAGE_ALIGN(size);
335860115fa5SKefeng Wang if (!(vm_flags & VM_DEFER_KMEMLEAK))
335994f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask);
336089219d37SCatalin Marinas
336119f1c3acSAndrey Konovalov return area->addr;
3362de7d2b56SJoe Perches
3363de7d2b56SJoe Perches fail:
3364121e6f32SNicholas Piggin if (shift > PAGE_SHIFT) {
3365121e6f32SNicholas Piggin shift = PAGE_SHIFT;
3366121e6f32SNicholas Piggin align = real_align;
3367121e6f32SNicholas Piggin size = real_size;
3368121e6f32SNicholas Piggin goto again;
3369121e6f32SNicholas Piggin }
3370121e6f32SNicholas Piggin
3371de7d2b56SJoe Perches return NULL;
3372930fc45aSChristoph Lameter }
3373930fc45aSChristoph Lameter
33741da177e4SLinus Torvalds /**
3375930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory
33761da177e4SLinus Torvalds * @size: allocation size
33772dca6999SDavid Miller * @align: desired alignment
33781da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator
337900ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE
3380c85d194bSRandy Dunlap * @caller: caller's return address
33811da177e4SLinus Torvalds *
3382f38fcb9cSChristoph Hellwig * Allocate enough pages to cover @size from the page level allocator with
3383f38fcb9cSChristoph Hellwig * @gfp_mask flags. Map them into contiguous kernel virtual space.
3384a7c3e901SMichal Hocko *
3385dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3386a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported
3387a7c3e901SMichal Hocko *
3388a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted
3389a7c3e901SMichal Hocko * with mm people.
3390a862f68aSMike Rapoport *
3391a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
33921da177e4SLinus Torvalds */
__vmalloc_node(unsigned long size,unsigned long align,gfp_t gfp_mask,int node,const void * caller)33932b905948SChristoph Hellwig void *__vmalloc_node(unsigned long size, unsigned long align,
3394f38fcb9cSChristoph Hellwig gfp_t gfp_mask, int node, const void *caller)
33951da177e4SLinus Torvalds {
3396d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3397f38fcb9cSChristoph Hellwig gfp_mask, PAGE_KERNEL, 0, node, caller);
33981da177e4SLinus Torvalds }
3399c3f896dcSChristoph Hellwig /*
3400c3f896dcSChristoph Hellwig * This is only for performance analysis of vmalloc and stress purpose.
3401c3f896dcSChristoph Hellwig * It is required by vmalloc test module, therefore do not use it other
3402c3f896dcSChristoph Hellwig * than that.
3403c3f896dcSChristoph Hellwig */
3404c3f896dcSChristoph Hellwig #ifdef CONFIG_TEST_VMALLOC_MODULE
3405c3f896dcSChristoph Hellwig EXPORT_SYMBOL_GPL(__vmalloc_node);
3406c3f896dcSChristoph Hellwig #endif
34071da177e4SLinus Torvalds
__vmalloc(unsigned long size,gfp_t gfp_mask)340888dca4caSChristoph Hellwig void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3409930fc45aSChristoph Lameter {
3410f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
341123016969SChristoph Lameter __builtin_return_address(0));
3412930fc45aSChristoph Lameter }
34131da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc);
34141da177e4SLinus Torvalds
34151da177e4SLinus Torvalds /**
34161da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory
34171da177e4SLinus Torvalds * @size: allocation size
341892eac168SMike Rapoport *
34191da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level
34201da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space.
34211da177e4SLinus Torvalds *
3422c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags
34231da177e4SLinus Torvalds * use __vmalloc() instead.
3424a862f68aSMike Rapoport *
3425a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
34261da177e4SLinus Torvalds */
vmalloc(unsigned long size)34271da177e4SLinus Torvalds void *vmalloc(unsigned long size)
34281da177e4SLinus Torvalds {
34294d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
34304d39d728SChristoph Hellwig __builtin_return_address(0));
34311da177e4SLinus Torvalds }
34321da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc);
34331da177e4SLinus Torvalds
3434930fc45aSChristoph Lameter /**
3435559089e0SSong Liu * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
343615a64f5aSClaudio Imbrenda * @size: allocation size
3437559089e0SSong Liu * @gfp_mask: flags for the page level allocator
343815a64f5aSClaudio Imbrenda *
3439559089e0SSong Liu * Allocate enough pages to cover @size from the page level
344015a64f5aSClaudio Imbrenda * allocator and map them into contiguous kernel virtual space.
3441559089e0SSong Liu * If @size is greater than or equal to PMD_SIZE, allow using
3442559089e0SSong Liu * huge pages for the memory
344315a64f5aSClaudio Imbrenda *
344415a64f5aSClaudio Imbrenda * Return: pointer to the allocated memory or %NULL on error
344515a64f5aSClaudio Imbrenda */
vmalloc_huge(unsigned long size,gfp_t gfp_mask)3446559089e0SSong Liu void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
344715a64f5aSClaudio Imbrenda {
344815a64f5aSClaudio Imbrenda return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3449559089e0SSong Liu gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
345015a64f5aSClaudio Imbrenda NUMA_NO_NODE, __builtin_return_address(0));
345115a64f5aSClaudio Imbrenda }
3452559089e0SSong Liu EXPORT_SYMBOL_GPL(vmalloc_huge);
345315a64f5aSClaudio Imbrenda
345415a64f5aSClaudio Imbrenda /**
3455e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill
3456e1ca7788SDave Young * @size: allocation size
345792eac168SMike Rapoport *
3458e1ca7788SDave Young * Allocate enough pages to cover @size from the page level
3459e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space.
3460e1ca7788SDave Young * The memory allocated is set to zero.
3461e1ca7788SDave Young *
3462e1ca7788SDave Young * For tight control over page level allocator and protection flags
3463e1ca7788SDave Young * use __vmalloc() instead.
3464a862f68aSMike Rapoport *
3465a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
3466e1ca7788SDave Young */
vzalloc(unsigned long size)3467e1ca7788SDave Young void *vzalloc(unsigned long size)
3468e1ca7788SDave Young {
34694d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
34704d39d728SChristoph Hellwig __builtin_return_address(0));
3471e1ca7788SDave Young }
3472e1ca7788SDave Young EXPORT_SYMBOL(vzalloc);
3473e1ca7788SDave Young
3474e1ca7788SDave Young /**
3475ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
347683342314SNick Piggin * @size: allocation size
3477ead04089SRolf Eike Beer *
3478ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace
3479ead04089SRolf Eike Beer * without leaking data.
3480a862f68aSMike Rapoport *
3481a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
348283342314SNick Piggin */
vmalloc_user(unsigned long size)348383342314SNick Piggin void *vmalloc_user(unsigned long size)
348483342314SNick Piggin {
3485bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3486bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3487bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE,
348800ef2d2fSDavid Rientjes __builtin_return_address(0));
348983342314SNick Piggin }
349083342314SNick Piggin EXPORT_SYMBOL(vmalloc_user);
349183342314SNick Piggin
349283342314SNick Piggin /**
3493930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node
3494930fc45aSChristoph Lameter * @size: allocation size
3495d44e0780SRandy Dunlap * @node: numa node
3496930fc45aSChristoph Lameter *
3497930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level
3498930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space.
3499930fc45aSChristoph Lameter *
3500c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags
3501930fc45aSChristoph Lameter * use __vmalloc() instead.
3502a862f68aSMike Rapoport *
3503a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
3504930fc45aSChristoph Lameter */
vmalloc_node(unsigned long size,int node)3505930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node)
3506930fc45aSChristoph Lameter {
3507f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, node,
3508f38fcb9cSChristoph Hellwig __builtin_return_address(0));
3509930fc45aSChristoph Lameter }
3510930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node);
3511930fc45aSChristoph Lameter
3512e1ca7788SDave Young /**
3513e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill
3514e1ca7788SDave Young * @size: allocation size
3515e1ca7788SDave Young * @node: numa node
3516e1ca7788SDave Young *
3517e1ca7788SDave Young * Allocate enough pages to cover @size from the page level
3518e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space.
3519e1ca7788SDave Young * The memory allocated is set to zero.
3520e1ca7788SDave Young *
3521a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
3522e1ca7788SDave Young */
vzalloc_node(unsigned long size,int node)3523e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node)
3524e1ca7788SDave Young {
35254d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
35264d39d728SChristoph Hellwig __builtin_return_address(0));
3527e1ca7788SDave Young }
3528e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node);
3529e1ca7788SDave Young
35300d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3531698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
35320d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3533698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
35340d08e0d3SAndi Kleen #else
3535698d0831SMichal Hocko /*
3536698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others
3537698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone.
3538698d0831SMichal Hocko */
353968d68ff6SZhiyuan Dai #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
35400d08e0d3SAndi Kleen #endif
35410d08e0d3SAndi Kleen
35421da177e4SLinus Torvalds /**
35431da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
35441da177e4SLinus Torvalds * @size: allocation size
35451da177e4SLinus Torvalds *
35461da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the
35471da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space.
3548a862f68aSMike Rapoport *
3549a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
35501da177e4SLinus Torvalds */
vmalloc_32(unsigned long size)35511da177e4SLinus Torvalds void *vmalloc_32(unsigned long size)
35521da177e4SLinus Torvalds {
3553f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3554f38fcb9cSChristoph Hellwig __builtin_return_address(0));
35551da177e4SLinus Torvalds }
35561da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32);
35571da177e4SLinus Torvalds
355883342314SNick Piggin /**
3559ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
356083342314SNick Piggin * @size: allocation size
3561ead04089SRolf Eike Beer *
3562ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be
3563ead04089SRolf Eike Beer * mapped to userspace without leaking data.
3564a862f68aSMike Rapoport *
3565a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error
356683342314SNick Piggin */
vmalloc_32_user(unsigned long size)356783342314SNick Piggin void *vmalloc_32_user(unsigned long size)
356883342314SNick Piggin {
3569bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3570bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3571bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE,
35725a82ac71SRoman Penyaev __builtin_return_address(0));
357383342314SNick Piggin }
357483342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user);
357583342314SNick Piggin
3576d0107eb0SKAMEZAWA Hiroyuki /*
35774c91c07cSLorenzo Stoakes * Atomically zero bytes in the iterator.
35784c91c07cSLorenzo Stoakes *
35794c91c07cSLorenzo Stoakes * Returns the number of zeroed bytes.
3580d0107eb0SKAMEZAWA Hiroyuki */
zero_iter(struct iov_iter * iter,size_t count)35814c91c07cSLorenzo Stoakes static size_t zero_iter(struct iov_iter *iter, size_t count)
3582d0107eb0SKAMEZAWA Hiroyuki {
35834c91c07cSLorenzo Stoakes size_t remains = count;
3584d0107eb0SKAMEZAWA Hiroyuki
35854c91c07cSLorenzo Stoakes while (remains > 0) {
35864c91c07cSLorenzo Stoakes size_t num, copied;
35874c91c07cSLorenzo Stoakes
35880e4bc271SLu Hongfei num = min_t(size_t, remains, PAGE_SIZE);
35894c91c07cSLorenzo Stoakes copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
35904c91c07cSLorenzo Stoakes remains -= copied;
35914c91c07cSLorenzo Stoakes
35924c91c07cSLorenzo Stoakes if (copied < num)
35934c91c07cSLorenzo Stoakes break;
35944c91c07cSLorenzo Stoakes }
35954c91c07cSLorenzo Stoakes
35964c91c07cSLorenzo Stoakes return count - remains;
35974c91c07cSLorenzo Stoakes }
35984c91c07cSLorenzo Stoakes
35994c91c07cSLorenzo Stoakes /*
36004c91c07cSLorenzo Stoakes * small helper routine, copy contents to iter from addr.
36014c91c07cSLorenzo Stoakes * If the page is not present, fill zero.
36024c91c07cSLorenzo Stoakes *
36034c91c07cSLorenzo Stoakes * Returns the number of copied bytes.
36044c91c07cSLorenzo Stoakes */
aligned_vread_iter(struct iov_iter * iter,const char * addr,size_t count)36054c91c07cSLorenzo Stoakes static size_t aligned_vread_iter(struct iov_iter *iter,
36064c91c07cSLorenzo Stoakes const char *addr, size_t count)
36074c91c07cSLorenzo Stoakes {
36084c91c07cSLorenzo Stoakes size_t remains = count;
36094c91c07cSLorenzo Stoakes struct page *page;
36104c91c07cSLorenzo Stoakes
36114c91c07cSLorenzo Stoakes while (remains > 0) {
3612d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length;
36134c91c07cSLorenzo Stoakes size_t copied = 0;
3614d0107eb0SKAMEZAWA Hiroyuki
3615891c49abSAlexander Kuleshov offset = offset_in_page(addr);
3616d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset;
36174c91c07cSLorenzo Stoakes if (length > remains)
36184c91c07cSLorenzo Stoakes length = remains;
36194c91c07cSLorenzo Stoakes page = vmalloc_to_page(addr);
3620d0107eb0SKAMEZAWA Hiroyuki /*
36214c91c07cSLorenzo Stoakes * To do safe access to this _mapped_ area, we need lock. But
36224c91c07cSLorenzo Stoakes * adding lock here means that we need to add overhead of
36234c91c07cSLorenzo Stoakes * vmalloc()/vfree() calls for this _debug_ interface, rarely
36244c91c07cSLorenzo Stoakes * used. Instead of that, we'll use an local mapping via
36254c91c07cSLorenzo Stoakes * copy_page_to_iter_nofault() and accept a small overhead in
36264c91c07cSLorenzo Stoakes * this access function.
3627d0107eb0SKAMEZAWA Hiroyuki */
36284c91c07cSLorenzo Stoakes if (page)
36294c91c07cSLorenzo Stoakes copied = copy_page_to_iter_nofault(page, offset,
36304c91c07cSLorenzo Stoakes length, iter);
36314c91c07cSLorenzo Stoakes else
36324c91c07cSLorenzo Stoakes copied = zero_iter(iter, length);
3633d0107eb0SKAMEZAWA Hiroyuki
36344c91c07cSLorenzo Stoakes addr += copied;
36354c91c07cSLorenzo Stoakes remains -= copied;
36364c91c07cSLorenzo Stoakes
36374c91c07cSLorenzo Stoakes if (copied != length)
36384c91c07cSLorenzo Stoakes break;
3639d0107eb0SKAMEZAWA Hiroyuki }
3640d0107eb0SKAMEZAWA Hiroyuki
36414c91c07cSLorenzo Stoakes return count - remains;
36424c91c07cSLorenzo Stoakes }
36434c91c07cSLorenzo Stoakes
36444c91c07cSLorenzo Stoakes /*
36454c91c07cSLorenzo Stoakes * Read from a vm_map_ram region of memory.
36464c91c07cSLorenzo Stoakes *
36474c91c07cSLorenzo Stoakes * Returns the number of copied bytes.
36484c91c07cSLorenzo Stoakes */
vmap_ram_vread_iter(struct iov_iter * iter,const char * addr,size_t count,unsigned long flags)36494c91c07cSLorenzo Stoakes static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
36504c91c07cSLorenzo Stoakes size_t count, unsigned long flags)
365106c89946SBaoquan He {
365206c89946SBaoquan He char *start;
365306c89946SBaoquan He struct vmap_block *vb;
3654062eacf5SUladzislau Rezki (Sony) struct xarray *xa;
365506c89946SBaoquan He unsigned long offset;
36564c91c07cSLorenzo Stoakes unsigned int rs, re;
36574c91c07cSLorenzo Stoakes size_t remains, n;
365806c89946SBaoquan He
365906c89946SBaoquan He /*
366006c89946SBaoquan He * If it's area created by vm_map_ram() interface directly, but
366106c89946SBaoquan He * not further subdividing and delegating management to vmap_block,
366206c89946SBaoquan He * handle it here.
366306c89946SBaoquan He */
36644c91c07cSLorenzo Stoakes if (!(flags & VMAP_BLOCK))
36654c91c07cSLorenzo Stoakes return aligned_vread_iter(iter, addr, count);
36664c91c07cSLorenzo Stoakes
36674c91c07cSLorenzo Stoakes remains = count;
366806c89946SBaoquan He
366906c89946SBaoquan He /*
367006c89946SBaoquan He * Area is split into regions and tracked with vmap_block, read out
367106c89946SBaoquan He * each region and zero fill the hole between regions.
367206c89946SBaoquan He */
3673fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa((unsigned long) addr);
3674062eacf5SUladzislau Rezki (Sony) vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
367506c89946SBaoquan He if (!vb)
36764c91c07cSLorenzo Stoakes goto finished_zero;
367706c89946SBaoquan He
367806c89946SBaoquan He spin_lock(&vb->lock);
367906c89946SBaoquan He if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
368006c89946SBaoquan He spin_unlock(&vb->lock);
36814c91c07cSLorenzo Stoakes goto finished_zero;
36824c91c07cSLorenzo Stoakes }
36834c91c07cSLorenzo Stoakes
36844c91c07cSLorenzo Stoakes for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
36854c91c07cSLorenzo Stoakes size_t copied;
36864c91c07cSLorenzo Stoakes
36874c91c07cSLorenzo Stoakes if (remains == 0)
36884c91c07cSLorenzo Stoakes goto finished;
36894c91c07cSLorenzo Stoakes
36904c91c07cSLorenzo Stoakes start = vmap_block_vaddr(vb->va->va_start, rs);
36914c91c07cSLorenzo Stoakes
36924c91c07cSLorenzo Stoakes if (addr < start) {
36934c91c07cSLorenzo Stoakes size_t to_zero = min_t(size_t, start - addr, remains);
36944c91c07cSLorenzo Stoakes size_t zeroed = zero_iter(iter, to_zero);
36954c91c07cSLorenzo Stoakes
36964c91c07cSLorenzo Stoakes addr += zeroed;
36974c91c07cSLorenzo Stoakes remains -= zeroed;
36984c91c07cSLorenzo Stoakes
36994c91c07cSLorenzo Stoakes if (remains == 0 || zeroed != to_zero)
370006c89946SBaoquan He goto finished;
370106c89946SBaoquan He }
37024c91c07cSLorenzo Stoakes
370306c89946SBaoquan He /*it could start reading from the middle of used region*/
370406c89946SBaoquan He offset = offset_in_page(addr);
370506c89946SBaoquan He n = ((re - rs + 1) << PAGE_SHIFT) - offset;
37064c91c07cSLorenzo Stoakes if (n > remains)
37074c91c07cSLorenzo Stoakes n = remains;
370806c89946SBaoquan He
37094c91c07cSLorenzo Stoakes copied = aligned_vread_iter(iter, start + offset, n);
37104c91c07cSLorenzo Stoakes
37114c91c07cSLorenzo Stoakes addr += copied;
37124c91c07cSLorenzo Stoakes remains -= copied;
37134c91c07cSLorenzo Stoakes
37144c91c07cSLorenzo Stoakes if (copied != n)
37154c91c07cSLorenzo Stoakes goto finished;
371606c89946SBaoquan He }
37174c91c07cSLorenzo Stoakes
371806c89946SBaoquan He spin_unlock(&vb->lock);
371906c89946SBaoquan He
37204c91c07cSLorenzo Stoakes finished_zero:
372106c89946SBaoquan He /* zero-fill the left dirty or free regions */
37224c91c07cSLorenzo Stoakes return count - remains + zero_iter(iter, remains);
37234c91c07cSLorenzo Stoakes finished:
37244c91c07cSLorenzo Stoakes /* We couldn't copy/zero everything */
37254c91c07cSLorenzo Stoakes spin_unlock(&vb->lock);
37264c91c07cSLorenzo Stoakes return count - remains;
372706c89946SBaoquan He }
372806c89946SBaoquan He
3729d0107eb0SKAMEZAWA Hiroyuki /**
37304c91c07cSLorenzo Stoakes * vread_iter() - read vmalloc area in a safe way to an iterator.
37314c91c07cSLorenzo Stoakes * @iter: the iterator to which data should be written.
3732d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address.
3733d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read.
3734d0107eb0SKAMEZAWA Hiroyuki *
3735d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and
3736d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range
3737d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to
3738d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled.
3739d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done.
3740d0107eb0SKAMEZAWA Hiroyuki *
3741d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive
3742a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer.
3743d0107eb0SKAMEZAWA Hiroyuki *
3744d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller
3745d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy().
3746d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without
3747bbcd53c9SDavid Hildenbrand * any information, as /proc/kcore.
3748a862f68aSMike Rapoport *
3749a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased
3750a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't
3751a862f68aSMike Rapoport * include any intersection with valid vmalloc area
3752d0107eb0SKAMEZAWA Hiroyuki */
vread_iter(struct iov_iter * iter,const char * addr,size_t count)37534c91c07cSLorenzo Stoakes long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
37541da177e4SLinus Torvalds {
3755e81ce85fSJoonsoo Kim struct vmap_area *va;
3756e81ce85fSJoonsoo Kim struct vm_struct *vm;
37574c91c07cSLorenzo Stoakes char *vaddr;
37584c91c07cSLorenzo Stoakes size_t n, size, flags, remains;
37591da177e4SLinus Torvalds
37604aff1dc4SAndrey Konovalov addr = kasan_reset_tag(addr);
37614aff1dc4SAndrey Konovalov
37621da177e4SLinus Torvalds /* Don't allow overflow */
37631da177e4SLinus Torvalds if ((unsigned long) addr + count < count)
37641da177e4SLinus Torvalds count = -(unsigned long) addr;
37651da177e4SLinus Torvalds
37664c91c07cSLorenzo Stoakes remains = count;
37674c91c07cSLorenzo Stoakes
3768e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock);
3769f181234aSChen Wandun va = find_vmap_area_exceed_addr((unsigned long)addr);
3770f608788cSSerapheim Dimitropoulos if (!va)
37714c91c07cSLorenzo Stoakes goto finished_zero;
3772f181234aSChen Wandun
3773f181234aSChen Wandun /* no intersects with alive vmap_area */
37744c91c07cSLorenzo Stoakes if ((unsigned long)addr + remains <= va->va_start)
37754c91c07cSLorenzo Stoakes goto finished_zero;
3776f181234aSChen Wandun
3777f608788cSSerapheim Dimitropoulos list_for_each_entry_from(va, &vmap_area_list, list) {
37784c91c07cSLorenzo Stoakes size_t copied;
37794c91c07cSLorenzo Stoakes
37804c91c07cSLorenzo Stoakes if (remains == 0)
37814c91c07cSLorenzo Stoakes goto finished;
3782e81ce85fSJoonsoo Kim
378306c89946SBaoquan He vm = va->vm;
378406c89946SBaoquan He flags = va->flags & VMAP_FLAGS_MASK;
378506c89946SBaoquan He /*
378606c89946SBaoquan He * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
378706c89946SBaoquan He * be set together with VMAP_RAM.
378806c89946SBaoquan He */
378906c89946SBaoquan He WARN_ON(flags == VMAP_BLOCK);
379006c89946SBaoquan He
379106c89946SBaoquan He if (!vm && !flags)
3792e81ce85fSJoonsoo Kim continue;
3793e81ce85fSJoonsoo Kim
379430a7a9b1SBaoquan He if (vm && (vm->flags & VM_UNINITIALIZED))
379530a7a9b1SBaoquan He continue;
37964c91c07cSLorenzo Stoakes
379730a7a9b1SBaoquan He /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
379830a7a9b1SBaoquan He smp_rmb();
379930a7a9b1SBaoquan He
380006c89946SBaoquan He vaddr = (char *) va->va_start;
380106c89946SBaoquan He size = vm ? get_vm_area_size(vm) : va_size(va);
380206c89946SBaoquan He
380306c89946SBaoquan He if (addr >= vaddr + size)
38041da177e4SLinus Torvalds continue;
38054c91c07cSLorenzo Stoakes
38064c91c07cSLorenzo Stoakes if (addr < vaddr) {
38074c91c07cSLorenzo Stoakes size_t to_zero = min_t(size_t, vaddr - addr, remains);
38084c91c07cSLorenzo Stoakes size_t zeroed = zero_iter(iter, to_zero);
38094c91c07cSLorenzo Stoakes
38104c91c07cSLorenzo Stoakes addr += zeroed;
38114c91c07cSLorenzo Stoakes remains -= zeroed;
38124c91c07cSLorenzo Stoakes
38134c91c07cSLorenzo Stoakes if (remains == 0 || zeroed != to_zero)
38141da177e4SLinus Torvalds goto finished;
38151da177e4SLinus Torvalds }
38164c91c07cSLorenzo Stoakes
381706c89946SBaoquan He n = vaddr + size - addr;
38184c91c07cSLorenzo Stoakes if (n > remains)
38194c91c07cSLorenzo Stoakes n = remains;
382006c89946SBaoquan He
382106c89946SBaoquan He if (flags & VMAP_RAM)
38224c91c07cSLorenzo Stoakes copied = vmap_ram_vread_iter(iter, addr, n, flags);
382306c89946SBaoquan He else if (!(vm->flags & VM_IOREMAP))
38244c91c07cSLorenzo Stoakes copied = aligned_vread_iter(iter, addr, n);
3825d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */
38264c91c07cSLorenzo Stoakes copied = zero_iter(iter, n);
38274c91c07cSLorenzo Stoakes
38284c91c07cSLorenzo Stoakes addr += copied;
38294c91c07cSLorenzo Stoakes remains -= copied;
38304c91c07cSLorenzo Stoakes
38314c91c07cSLorenzo Stoakes if (copied != n)
38324c91c07cSLorenzo Stoakes goto finished;
38331da177e4SLinus Torvalds }
38344c91c07cSLorenzo Stoakes
38354c91c07cSLorenzo Stoakes finished_zero:
38364c91c07cSLorenzo Stoakes spin_unlock(&vmap_area_lock);
38374c91c07cSLorenzo Stoakes /* zero-fill memory holes */
38384c91c07cSLorenzo Stoakes return count - remains + zero_iter(iter, remains);
38391da177e4SLinus Torvalds finished:
38404c91c07cSLorenzo Stoakes /* Nothing remains, or We couldn't copy/zero everything. */
3841e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock);
3842d0107eb0SKAMEZAWA Hiroyuki
38434c91c07cSLorenzo Stoakes return count - remains;
38441da177e4SLinus Torvalds }
38451da177e4SLinus Torvalds
3846d0107eb0SKAMEZAWA Hiroyuki /**
3847e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace
3848e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover
3849e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at
3850e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory
3851bdebd6a2SJann Horn * @pgoff: offset from @kaddr to start at
3852e69e9d4aSHATAYAMA Daisuke * @size: size of map area
3853e69e9d4aSHATAYAMA Daisuke *
3854e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure
3855e69e9d4aSHATAYAMA Daisuke *
3856e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area,
3857e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at
3858e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't
3859e69e9d4aSHATAYAMA Daisuke * met.
3860e69e9d4aSHATAYAMA Daisuke *
3861e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c)
3862e69e9d4aSHATAYAMA Daisuke */
remap_vmalloc_range_partial(struct vm_area_struct * vma,unsigned long uaddr,void * kaddr,unsigned long pgoff,unsigned long size)3863e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3864bdebd6a2SJann Horn void *kaddr, unsigned long pgoff,
3865bdebd6a2SJann Horn unsigned long size)
3866e69e9d4aSHATAYAMA Daisuke {
3867e69e9d4aSHATAYAMA Daisuke struct vm_struct *area;
3868bdebd6a2SJann Horn unsigned long off;
3869bdebd6a2SJann Horn unsigned long end_index;
3870bdebd6a2SJann Horn
3871bdebd6a2SJann Horn if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3872bdebd6a2SJann Horn return -EINVAL;
3873e69e9d4aSHATAYAMA Daisuke
3874e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size);
3875e69e9d4aSHATAYAMA Daisuke
3876e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3877e69e9d4aSHATAYAMA Daisuke return -EINVAL;
3878e69e9d4aSHATAYAMA Daisuke
3879e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr);
3880e69e9d4aSHATAYAMA Daisuke if (!area)
3881e69e9d4aSHATAYAMA Daisuke return -EINVAL;
3882e69e9d4aSHATAYAMA Daisuke
3883fe9041c2SChristoph Hellwig if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3884e69e9d4aSHATAYAMA Daisuke return -EINVAL;
3885e69e9d4aSHATAYAMA Daisuke
3886bdebd6a2SJann Horn if (check_add_overflow(size, off, &end_index) ||
3887bdebd6a2SJann Horn end_index > get_vm_area_size(area))
3888e69e9d4aSHATAYAMA Daisuke return -EINVAL;
3889bdebd6a2SJann Horn kaddr += off;
3890e69e9d4aSHATAYAMA Daisuke
3891e69e9d4aSHATAYAMA Daisuke do {
3892e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr);
3893e69e9d4aSHATAYAMA Daisuke int ret;
3894e69e9d4aSHATAYAMA Daisuke
3895e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page);
3896e69e9d4aSHATAYAMA Daisuke if (ret)
3897e69e9d4aSHATAYAMA Daisuke return ret;
3898e69e9d4aSHATAYAMA Daisuke
3899e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE;
3900e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE;
3901e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE;
3902e69e9d4aSHATAYAMA Daisuke } while (size > 0);
3903e69e9d4aSHATAYAMA Daisuke
39041c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
3905e69e9d4aSHATAYAMA Daisuke
3906e69e9d4aSHATAYAMA Daisuke return 0;
3907e69e9d4aSHATAYAMA Daisuke }
3908e69e9d4aSHATAYAMA Daisuke
3909e69e9d4aSHATAYAMA Daisuke /**
391083342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace
391183342314SNick Piggin * @vma: vma to cover (map full range of vma)
391283342314SNick Piggin * @addr: vmalloc memory
391383342314SNick Piggin * @pgoff: number of pages into addr before first page to map
39147682486bSRandy Dunlap *
39157682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure
391683342314SNick Piggin *
391783342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and
391883342314SNick Piggin * that it is big enough to cover the vma. Will return failure if
391983342314SNick Piggin * that criteria isn't met.
392083342314SNick Piggin *
392172fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c)
392283342314SNick Piggin */
remap_vmalloc_range(struct vm_area_struct * vma,void * addr,unsigned long pgoff)392383342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
392483342314SNick Piggin unsigned long pgoff)
392583342314SNick Piggin {
3926e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start,
3927bdebd6a2SJann Horn addr, pgoff,
3928e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start);
392983342314SNick Piggin }
393083342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range);
393183342314SNick Piggin
free_vm_area(struct vm_struct * area)39325f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area)
39335f4352fbSJeremy Fitzhardinge {
39345f4352fbSJeremy Fitzhardinge struct vm_struct *ret;
39355f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr);
39365f4352fbSJeremy Fitzhardinge BUG_ON(ret != area);
39375f4352fbSJeremy Fitzhardinge kfree(area);
39385f4352fbSJeremy Fitzhardinge }
39395f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area);
3940a10aa579SChristoph Lameter
39414f8b02b4STejun Heo #ifdef CONFIG_SMP
node_to_va(struct rb_node * n)3942ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n)
3943ca23e405STejun Heo {
39444583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node);
3945ca23e405STejun Heo }
3946ca23e405STejun Heo
3947ca23e405STejun Heo /**
394868ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
394968ad4a33SUladzislau Rezki (Sony) * @addr: target address
3950ca23e405STejun Heo *
395168ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area
395268ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned
395368ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL
395468ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr.
3955ca23e405STejun Heo */
395668ad4a33SUladzislau Rezki (Sony) static struct vmap_area *
pvm_find_va_enclose_addr(unsigned long addr)395768ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr)
3958ca23e405STejun Heo {
395968ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp;
396068ad4a33SUladzislau Rezki (Sony) struct rb_node *n;
396168ad4a33SUladzislau Rezki (Sony)
396268ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node;
396368ad4a33SUladzislau Rezki (Sony) va = NULL;
3964ca23e405STejun Heo
3965ca23e405STejun Heo while (n) {
396668ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node);
396768ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) {
396868ad4a33SUladzislau Rezki (Sony) va = tmp;
396968ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr)
3970ca23e405STejun Heo break;
3971ca23e405STejun Heo
397268ad4a33SUladzislau Rezki (Sony) n = n->rb_right;
3973ca23e405STejun Heo } else {
397468ad4a33SUladzislau Rezki (Sony) n = n->rb_left;
3975ca23e405STejun Heo }
397668ad4a33SUladzislau Rezki (Sony) }
397768ad4a33SUladzislau Rezki (Sony)
397868ad4a33SUladzislau Rezki (Sony) return va;
3979ca23e405STejun Heo }
3980ca23e405STejun Heo
3981ca23e405STejun Heo /**
398268ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address
398368ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END
398468ad4a33SUladzislau Rezki (Sony) * @va:
398568ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order);
398668ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address.
3987799fa85dSAlex Shi * @align: alignment for required highest address
3988ca23e405STejun Heo *
398968ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area
3990ca23e405STejun Heo */
399168ad4a33SUladzislau Rezki (Sony) static unsigned long
pvm_determine_end_from_reverse(struct vmap_area ** va,unsigned long align)399268ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3993ca23e405STejun Heo {
399468ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3995ca23e405STejun Heo unsigned long addr;
3996ca23e405STejun Heo
399768ad4a33SUladzislau Rezki (Sony) if (likely(*va)) {
399868ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va),
399968ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) {
400068ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
400168ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr)
400268ad4a33SUladzislau Rezki (Sony) return addr;
400368ad4a33SUladzislau Rezki (Sony) }
4004ca23e405STejun Heo }
4005ca23e405STejun Heo
400668ad4a33SUladzislau Rezki (Sony) return 0;
4007ca23e405STejun Heo }
4008ca23e405STejun Heo
4009ca23e405STejun Heo /**
4010ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4011ca23e405STejun Heo * @offsets: array containing offset of each area
4012ca23e405STejun Heo * @sizes: array containing size of each area
4013ca23e405STejun Heo * @nr_vms: the number of areas to allocate
4014ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4015ca23e405STejun Heo *
4016ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4017ca23e405STejun Heo * vm_structs on success, %NULL on failure
4018ca23e405STejun Heo *
4019ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can
4020ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates
4021ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
4022ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up
4023ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these
4024ec3f64fcSDavid Rientjes * areas are allocated from top.
4025ca23e405STejun Heo *
4026ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It
402768ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking
402868ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the
402968ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till
403068ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted
403168ad4a33SUladzislau Rezki (Sony) * and the result is returned.
4032ca23e405STejun Heo */
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)4033ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4034ca23e405STejun Heo const size_t *sizes, int nr_vms,
4035ec3f64fcSDavid Rientjes size_t align)
4036ca23e405STejun Heo {
4037ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4038ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
403968ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va;
4040ca23e405STejun Heo struct vm_struct **vms;
4041ca23e405STejun Heo int area, area2, last_area, term_area;
4042253a496dSDaniel Axtens unsigned long base, start, size, end, last_end, orig_start, orig_end;
4043ca23e405STejun Heo bool purged = false;
4044ca23e405STejun Heo
4045ca23e405STejun Heo /* verify parameters and allocate data structures */
4046891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4047ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) {
4048ca23e405STejun Heo start = offsets[area];
4049ca23e405STejun Heo end = start + sizes[area];
4050ca23e405STejun Heo
4051ca23e405STejun Heo /* is everything aligned properly? */
4052ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align));
4053ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align));
4054ca23e405STejun Heo
4055ca23e405STejun Heo /* detect the area with the highest address */
4056ca23e405STejun Heo if (start > offsets[last_area])
4057ca23e405STejun Heo last_area = area;
4058ca23e405STejun Heo
4059c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) {
4060ca23e405STejun Heo unsigned long start2 = offsets[area2];
4061ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2];
4062ca23e405STejun Heo
4063c568da28SWei Yang BUG_ON(start2 < end && start < end2);
4064ca23e405STejun Heo }
4065ca23e405STejun Heo }
4066ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area];
4067ca23e405STejun Heo
4068ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) {
4069ca23e405STejun Heo WARN_ON(true);
4070ca23e405STejun Heo return NULL;
4071ca23e405STejun Heo }
4072ca23e405STejun Heo
40734d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
40744d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4075ca23e405STejun Heo if (!vas || !vms)
4076f1db7afdSKautuk Consul goto err_free2;
4077ca23e405STejun Heo
4078ca23e405STejun Heo for (area = 0; area < nr_vms; area++) {
407968ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4080ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4081ca23e405STejun Heo if (!vas[area] || !vms[area])
4082ca23e405STejun Heo goto err_free;
4083ca23e405STejun Heo }
4084ca23e405STejun Heo retry:
4085e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock);
4086ca23e405STejun Heo
4087ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */
4088ca23e405STejun Heo area = term_area = last_area;
4089ca23e405STejun Heo start = offsets[area];
4090ca23e405STejun Heo end = start + sizes[area];
4091ca23e405STejun Heo
409268ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end);
409368ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end;
4094ca23e405STejun Heo
4095ca23e405STejun Heo while (true) {
4096ca23e405STejun Heo /*
4097ca23e405STejun Heo * base might have underflowed, add last_end before
4098ca23e405STejun Heo * comparing.
4099ca23e405STejun Heo */
410068ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end)
410168ad4a33SUladzislau Rezki (Sony) goto overflow;
4102ca23e405STejun Heo
4103ca23e405STejun Heo /*
410468ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found.
4105ca23e405STejun Heo */
410668ad4a33SUladzislau Rezki (Sony) if (va == NULL)
410768ad4a33SUladzislau Rezki (Sony) goto overflow;
4108ca23e405STejun Heo
4109ca23e405STejun Heo /*
4110d8cc323dSQiujun Huang * If required width exceeds current VA block, move
41115336e52cSKuppuswamy Sathyanarayanan * base downwards and then recheck.
41125336e52cSKuppuswamy Sathyanarayanan */
41135336e52cSKuppuswamy Sathyanarayanan if (base + end > va->va_end) {
41145336e52cSKuppuswamy Sathyanarayanan base = pvm_determine_end_from_reverse(&va, align) - end;
41155336e52cSKuppuswamy Sathyanarayanan term_area = area;
41165336e52cSKuppuswamy Sathyanarayanan continue;
41175336e52cSKuppuswamy Sathyanarayanan }
41185336e52cSKuppuswamy Sathyanarayanan
41195336e52cSKuppuswamy Sathyanarayanan /*
412068ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck.
4121ca23e405STejun Heo */
41225336e52cSKuppuswamy Sathyanarayanan if (base + start < va->va_start) {
412368ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node));
412468ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end;
4125ca23e405STejun Heo term_area = area;
4126ca23e405STejun Heo continue;
4127ca23e405STejun Heo }
4128ca23e405STejun Heo
4129ca23e405STejun Heo /*
4130ca23e405STejun Heo * This area fits, move on to the previous one. If
4131ca23e405STejun Heo * the previous one is the terminal one, we're done.
4132ca23e405STejun Heo */
4133ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms;
4134ca23e405STejun Heo if (area == term_area)
4135ca23e405STejun Heo break;
413668ad4a33SUladzislau Rezki (Sony)
4137ca23e405STejun Heo start = offsets[area];
4138ca23e405STejun Heo end = start + sizes[area];
413968ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end);
4140ca23e405STejun Heo }
414168ad4a33SUladzislau Rezki (Sony)
4142ca23e405STejun Heo /* we've found a fitting base, insert all va's */
4143ca23e405STejun Heo for (area = 0; area < nr_vms; area++) {
414468ad4a33SUladzislau Rezki (Sony) int ret;
4145ca23e405STejun Heo
414668ad4a33SUladzislau Rezki (Sony) start = base + offsets[area];
414768ad4a33SUladzislau Rezki (Sony) size = sizes[area];
414868ad4a33SUladzislau Rezki (Sony)
414968ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start);
415068ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL))
415168ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */
415268ad4a33SUladzislau Rezki (Sony) goto recovery;
415368ad4a33SUladzislau Rezki (Sony)
4154f9863be4SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(&free_vmap_area_root,
4155f9863be4SUladzislau Rezki (Sony) &free_vmap_area_list,
4156f9863be4SUladzislau Rezki (Sony) va, start, size);
41571b23ff80SBaoquan He if (WARN_ON_ONCE(unlikely(ret)))
415868ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */
415968ad4a33SUladzislau Rezki (Sony) goto recovery;
416068ad4a33SUladzislau Rezki (Sony)
416168ad4a33SUladzislau Rezki (Sony) /* Allocated area. */
416268ad4a33SUladzislau Rezki (Sony) va = vas[area];
416368ad4a33SUladzislau Rezki (Sony) va->va_start = start;
416468ad4a33SUladzislau Rezki (Sony) va->va_end = start + size;
4165ca23e405STejun Heo }
4166ca23e405STejun Heo
4167e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock);
4168ca23e405STejun Heo
4169253a496dSDaniel Axtens /* populate the kasan shadow space */
4170253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) {
4171253a496dSDaniel Axtens if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4172253a496dSDaniel Axtens goto err_free_shadow;
4173253a496dSDaniel Axtens }
4174253a496dSDaniel Axtens
4175ca23e405STejun Heo /* insert all vm's */
4176e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock);
4177e36176beSUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) {
4178e36176beSUladzislau Rezki (Sony) insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
4179e36176beSUladzislau Rezki (Sony)
4180e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
4181ca23e405STejun Heo pcpu_get_vm_areas);
4182e36176beSUladzislau Rezki (Sony) }
4183e36176beSUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock);
4184ca23e405STejun Heo
418519f1c3acSAndrey Konovalov /*
418619f1c3acSAndrey Konovalov * Mark allocated areas as accessible. Do it now as a best-effort
418719f1c3acSAndrey Konovalov * approach, as they can be mapped outside of vmalloc code.
418823689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for
418923689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
419019f1c3acSAndrey Konovalov */
41911d96320fSAndrey Konovalov for (area = 0; area < nr_vms; area++)
41921d96320fSAndrey Konovalov vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4193f6e39794SAndrey Konovalov vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
41941d96320fSAndrey Konovalov
4195ca23e405STejun Heo kfree(vas);
4196ca23e405STejun Heo return vms;
4197ca23e405STejun Heo
419868ad4a33SUladzislau Rezki (Sony) recovery:
4199e36176beSUladzislau Rezki (Sony) /*
4200e36176beSUladzislau Rezki (Sony) * Remove previously allocated areas. There is no
4201e36176beSUladzislau Rezki (Sony) * need in removing these areas from the busy tree,
4202e36176beSUladzislau Rezki (Sony) * because they are inserted only on the final step
4203e36176beSUladzislau Rezki (Sony) * and when pcpu_get_vm_areas() is success.
4204e36176beSUladzislau Rezki (Sony) */
420568ad4a33SUladzislau Rezki (Sony) while (area--) {
4206253a496dSDaniel Axtens orig_start = vas[area]->va_start;
4207253a496dSDaniel Axtens orig_end = vas[area]->va_end;
420896e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
42093c5c3cfbSDaniel Axtens &free_vmap_area_list);
42109c801f61SUladzislau Rezki (Sony) if (va)
4211253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end,
4212253a496dSDaniel Axtens va->va_start, va->va_end);
421368ad4a33SUladzislau Rezki (Sony) vas[area] = NULL;
421468ad4a33SUladzislau Rezki (Sony) }
421568ad4a33SUladzislau Rezki (Sony)
421668ad4a33SUladzislau Rezki (Sony) overflow:
4217e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock);
421868ad4a33SUladzislau Rezki (Sony) if (!purged) {
421977e50af0SThomas Gleixner reclaim_and_purge_vmap_areas();
422068ad4a33SUladzislau Rezki (Sony) purged = true;
422168ad4a33SUladzislau Rezki (Sony)
422268ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */
422368ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) {
422468ad4a33SUladzislau Rezki (Sony) if (vas[area])
422568ad4a33SUladzislau Rezki (Sony) continue;
422668ad4a33SUladzislau Rezki (Sony)
422768ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(
422868ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL);
422968ad4a33SUladzislau Rezki (Sony) if (!vas[area])
423068ad4a33SUladzislau Rezki (Sony) goto err_free;
423168ad4a33SUladzislau Rezki (Sony) }
423268ad4a33SUladzislau Rezki (Sony)
423368ad4a33SUladzislau Rezki (Sony) goto retry;
423468ad4a33SUladzislau Rezki (Sony) }
423568ad4a33SUladzislau Rezki (Sony)
4236ca23e405STejun Heo err_free:
4237ca23e405STejun Heo for (area = 0; area < nr_vms; area++) {
423868ad4a33SUladzislau Rezki (Sony) if (vas[area])
423968ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]);
424068ad4a33SUladzislau Rezki (Sony)
4241ca23e405STejun Heo kfree(vms[area]);
4242ca23e405STejun Heo }
4243f1db7afdSKautuk Consul err_free2:
4244ca23e405STejun Heo kfree(vas);
4245ca23e405STejun Heo kfree(vms);
4246ca23e405STejun Heo return NULL;
4247253a496dSDaniel Axtens
4248253a496dSDaniel Axtens err_free_shadow:
4249253a496dSDaniel Axtens spin_lock(&free_vmap_area_lock);
4250253a496dSDaniel Axtens /*
4251253a496dSDaniel Axtens * We release all the vmalloc shadows, even the ones for regions that
4252253a496dSDaniel Axtens * hadn't been successfully added. This relies on kasan_release_vmalloc
4253253a496dSDaniel Axtens * being able to tolerate this case.
4254253a496dSDaniel Axtens */
4255253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) {
4256253a496dSDaniel Axtens orig_start = vas[area]->va_start;
4257253a496dSDaniel Axtens orig_end = vas[area]->va_end;
425896e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4259253a496dSDaniel Axtens &free_vmap_area_list);
42609c801f61SUladzislau Rezki (Sony) if (va)
4261253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end,
4262253a496dSDaniel Axtens va->va_start, va->va_end);
4263253a496dSDaniel Axtens vas[area] = NULL;
4264253a496dSDaniel Axtens kfree(vms[area]);
4265253a496dSDaniel Axtens }
4266253a496dSDaniel Axtens spin_unlock(&free_vmap_area_lock);
4267253a496dSDaniel Axtens kfree(vas);
4268253a496dSDaniel Axtens kfree(vms);
4269253a496dSDaniel Axtens return NULL;
4270ca23e405STejun Heo }
4271ca23e405STejun Heo
4272ca23e405STejun Heo /**
4273ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4274ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4275ca23e405STejun Heo * @nr_vms: the number of allocated areas
4276ca23e405STejun Heo *
4277ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4278ca23e405STejun Heo */
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)4279ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4280ca23e405STejun Heo {
4281ca23e405STejun Heo int i;
4282ca23e405STejun Heo
4283ca23e405STejun Heo for (i = 0; i < nr_vms; i++)
4284ca23e405STejun Heo free_vm_area(vms[i]);
4285ca23e405STejun Heo kfree(vms);
4286ca23e405STejun Heo }
42874f8b02b4STejun Heo #endif /* CONFIG_SMP */
4288a10aa579SChristoph Lameter
42895bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK
vmalloc_dump_obj(void * object)429098f18083SPaul E. McKenney bool vmalloc_dump_obj(void *object)
429198f18083SPaul E. McKenney {
429298f18083SPaul E. McKenney void *objp = (void *)PAGE_ALIGN((unsigned long)object);
42930818e739SJoel Fernandes (Google) const void *caller;
42940818e739SJoel Fernandes (Google) struct vm_struct *vm;
42950818e739SJoel Fernandes (Google) struct vmap_area *va;
42960818e739SJoel Fernandes (Google) unsigned long addr;
42970818e739SJoel Fernandes (Google) unsigned int nr_pages;
429898f18083SPaul E. McKenney
42990818e739SJoel Fernandes (Google) if (!spin_trylock(&vmap_area_lock))
430098f18083SPaul E. McKenney return false;
43010818e739SJoel Fernandes (Google) va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
43020818e739SJoel Fernandes (Google) if (!va) {
43030818e739SJoel Fernandes (Google) spin_unlock(&vmap_area_lock);
43040818e739SJoel Fernandes (Google) return false;
43050818e739SJoel Fernandes (Google) }
43060818e739SJoel Fernandes (Google)
43070818e739SJoel Fernandes (Google) vm = va->vm;
43080818e739SJoel Fernandes (Google) if (!vm) {
43090818e739SJoel Fernandes (Google) spin_unlock(&vmap_area_lock);
43100818e739SJoel Fernandes (Google) return false;
43110818e739SJoel Fernandes (Google) }
43120818e739SJoel Fernandes (Google) addr = (unsigned long)vm->addr;
43130818e739SJoel Fernandes (Google) caller = vm->caller;
43140818e739SJoel Fernandes (Google) nr_pages = vm->nr_pages;
43150818e739SJoel Fernandes (Google) spin_unlock(&vmap_area_lock);
4316bd34dcd4SPaul E. McKenney pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
43170818e739SJoel Fernandes (Google) nr_pages, addr, caller);
431898f18083SPaul E. McKenney return true;
431998f18083SPaul E. McKenney }
43205bb1bb35SPaul E. McKenney #endif
432198f18083SPaul E. McKenney
4322a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS
s_start(struct seq_file * m,loff_t * pos)4323a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos)
4324e36176beSUladzislau Rezki (Sony) __acquires(&vmap_purge_lock)
4325d4033afdSJoonsoo Kim __acquires(&vmap_area_lock)
4326a10aa579SChristoph Lameter {
4327e36176beSUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock);
4328d4033afdSJoonsoo Kim spin_lock(&vmap_area_lock);
4329e36176beSUladzislau Rezki (Sony)
43303f500069Szijun_hu return seq_list_start(&vmap_area_list, *pos);
4331a10aa579SChristoph Lameter }
4332a10aa579SChristoph Lameter
s_next(struct seq_file * m,void * p,loff_t * pos)4333a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4334a10aa579SChristoph Lameter {
43353f500069Szijun_hu return seq_list_next(p, &vmap_area_list, pos);
4336a10aa579SChristoph Lameter }
4337a10aa579SChristoph Lameter
s_stop(struct seq_file * m,void * p)4338a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p)
4339d4033afdSJoonsoo Kim __releases(&vmap_area_lock)
43400a7dd4e9SWaiman Long __releases(&vmap_purge_lock)
4341a10aa579SChristoph Lameter {
4342d4033afdSJoonsoo Kim spin_unlock(&vmap_area_lock);
43430a7dd4e9SWaiman Long mutex_unlock(&vmap_purge_lock);
4344a10aa579SChristoph Lameter }
4345a10aa579SChristoph Lameter
show_numa_info(struct seq_file * m,struct vm_struct * v)4346a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4347a47a126aSEric Dumazet {
4348e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) {
4349a47a126aSEric Dumazet unsigned int nr, *counters = m->private;
435051e50b3aSEric Dumazet unsigned int step = 1U << vm_area_page_order(v);
4351a47a126aSEric Dumazet
4352a47a126aSEric Dumazet if (!counters)
4353a47a126aSEric Dumazet return;
4354a47a126aSEric Dumazet
4355af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED)
4356af12346cSWanpeng Li return;
43577e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
43587e5b528bSDmitry Vyukov smp_rmb();
4359af12346cSWanpeng Li
4360a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4361a47a126aSEric Dumazet
436251e50b3aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr += step)
436351e50b3aSEric Dumazet counters[page_to_nid(v->pages[nr])] += step;
4364a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY)
4365a47a126aSEric Dumazet if (counters[nr])
4366a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]);
4367a47a126aSEric Dumazet }
4368a47a126aSEric Dumazet }
4369a47a126aSEric Dumazet
show_purge_info(struct seq_file * m)4370dd3b8353SUladzislau Rezki (Sony) static void show_purge_info(struct seq_file *m)
4371dd3b8353SUladzislau Rezki (Sony) {
4372dd3b8353SUladzislau Rezki (Sony) struct vmap_area *va;
4373dd3b8353SUladzislau Rezki (Sony)
437496e2db45SUladzislau Rezki (Sony) spin_lock(&purge_vmap_area_lock);
437596e2db45SUladzislau Rezki (Sony) list_for_each_entry(va, &purge_vmap_area_list, list) {
4376dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4377dd3b8353SUladzislau Rezki (Sony) (void *)va->va_start, (void *)va->va_end,
4378dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start);
4379dd3b8353SUladzislau Rezki (Sony) }
438096e2db45SUladzislau Rezki (Sony) spin_unlock(&purge_vmap_area_lock);
4381dd3b8353SUladzislau Rezki (Sony) }
4382dd3b8353SUladzislau Rezki (Sony)
s_show(struct seq_file * m,void * p)4383a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p)
4384a10aa579SChristoph Lameter {
43853f500069Szijun_hu struct vmap_area *va;
4386d4033afdSJoonsoo Kim struct vm_struct *v;
4387d4033afdSJoonsoo Kim
43883f500069Szijun_hu va = list_entry(p, struct vmap_area, list);
43893f500069Szijun_hu
4390688fcbfcSPengfei Li if (!va->vm) {
4391bba9697bSBaoquan He if (va->flags & VMAP_RAM)
4392dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
439378c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end,
4394dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start);
439578c72746SYisheng Xie
43967cc7913eSEric Dumazet goto final;
439778c72746SYisheng Xie }
4398d4033afdSJoonsoo Kim
4399d4033afdSJoonsoo Kim v = va->vm;
4400a10aa579SChristoph Lameter
440145ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld",
4402a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size);
4403a10aa579SChristoph Lameter
440462c70bceSJoe Perches if (v->caller)
440562c70bceSJoe Perches seq_printf(m, " %pS", v->caller);
440623016969SChristoph Lameter
4407a10aa579SChristoph Lameter if (v->nr_pages)
4408a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages);
4409a10aa579SChristoph Lameter
4410a10aa579SChristoph Lameter if (v->phys_addr)
4411199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr);
4412a10aa579SChristoph Lameter
4413a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP)
4414f4527c90SFabian Frederick seq_puts(m, " ioremap");
4415a10aa579SChristoph Lameter
4416a10aa579SChristoph Lameter if (v->flags & VM_ALLOC)
4417f4527c90SFabian Frederick seq_puts(m, " vmalloc");
4418a10aa579SChristoph Lameter
4419a10aa579SChristoph Lameter if (v->flags & VM_MAP)
4420f4527c90SFabian Frederick seq_puts(m, " vmap");
4421a10aa579SChristoph Lameter
4422a10aa579SChristoph Lameter if (v->flags & VM_USERMAP)
4423f4527c90SFabian Frederick seq_puts(m, " user");
4424a10aa579SChristoph Lameter
4425fe9041c2SChristoph Hellwig if (v->flags & VM_DMA_COHERENT)
4426fe9041c2SChristoph Hellwig seq_puts(m, " dma-coherent");
4427fe9041c2SChristoph Hellwig
4428244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages))
4429f4527c90SFabian Frederick seq_puts(m, " vpages");
4430a10aa579SChristoph Lameter
4431a47a126aSEric Dumazet show_numa_info(m, v);
4432a10aa579SChristoph Lameter seq_putc(m, '\n');
4433dd3b8353SUladzislau Rezki (Sony)
4434dd3b8353SUladzislau Rezki (Sony) /*
443596e2db45SUladzislau Rezki (Sony) * As a final step, dump "unpurged" areas.
4436dd3b8353SUladzislau Rezki (Sony) */
44377cc7913eSEric Dumazet final:
4438dd3b8353SUladzislau Rezki (Sony) if (list_is_last(&va->list, &vmap_area_list))
4439dd3b8353SUladzislau Rezki (Sony) show_purge_info(m);
4440dd3b8353SUladzislau Rezki (Sony)
4441a10aa579SChristoph Lameter return 0;
4442a10aa579SChristoph Lameter }
4443a10aa579SChristoph Lameter
44445f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = {
4445a10aa579SChristoph Lameter .start = s_start,
4446a10aa579SChristoph Lameter .next = s_next,
4447a10aa579SChristoph Lameter .stop = s_stop,
4448a10aa579SChristoph Lameter .show = s_show,
4449a10aa579SChristoph Lameter };
44505f6a6a9cSAlexey Dobriyan
proc_vmalloc_init(void)44515f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void)
44525f6a6a9cSAlexey Dobriyan {
4453fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA))
44540825a6f9SJoe Perches proc_create_seq_private("vmallocinfo", 0400, NULL,
445544414d82SChristoph Hellwig &vmalloc_op,
445644414d82SChristoph Hellwig nr_node_ids * sizeof(unsigned int), NULL);
4457fddda2b7SChristoph Hellwig else
44580825a6f9SJoe Perches proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
44595f6a6a9cSAlexey Dobriyan return 0;
44605f6a6a9cSAlexey Dobriyan }
44615f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init);
4462db3808c1SJoonsoo Kim
4463a10aa579SChristoph Lameter #endif
4464208162f4SChristoph Hellwig
vmalloc_init(void)4465208162f4SChristoph Hellwig void __init vmalloc_init(void)
4466208162f4SChristoph Hellwig {
4467208162f4SChristoph Hellwig struct vmap_area *va;
4468208162f4SChristoph Hellwig struct vm_struct *tmp;
4469208162f4SChristoph Hellwig int i;
4470208162f4SChristoph Hellwig
4471208162f4SChristoph Hellwig /*
4472208162f4SChristoph Hellwig * Create the cache for vmap_area objects.
4473208162f4SChristoph Hellwig */
4474208162f4SChristoph Hellwig vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
4475208162f4SChristoph Hellwig
4476208162f4SChristoph Hellwig for_each_possible_cpu(i) {
4477208162f4SChristoph Hellwig struct vmap_block_queue *vbq;
4478208162f4SChristoph Hellwig struct vfree_deferred *p;
4479208162f4SChristoph Hellwig
4480208162f4SChristoph Hellwig vbq = &per_cpu(vmap_block_queue, i);
4481208162f4SChristoph Hellwig spin_lock_init(&vbq->lock);
4482208162f4SChristoph Hellwig INIT_LIST_HEAD(&vbq->free);
4483208162f4SChristoph Hellwig p = &per_cpu(vfree_deferred, i);
4484208162f4SChristoph Hellwig init_llist_head(&p->list);
4485208162f4SChristoph Hellwig INIT_WORK(&p->wq, delayed_vfree_work);
4486062eacf5SUladzislau Rezki (Sony) xa_init(&vbq->vmap_blocks);
4487208162f4SChristoph Hellwig }
4488208162f4SChristoph Hellwig
4489208162f4SChristoph Hellwig /* Import existing vmlist entries. */
4490208162f4SChristoph Hellwig for (tmp = vmlist; tmp; tmp = tmp->next) {
4491208162f4SChristoph Hellwig va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4492208162f4SChristoph Hellwig if (WARN_ON_ONCE(!va))
4493208162f4SChristoph Hellwig continue;
4494208162f4SChristoph Hellwig
4495208162f4SChristoph Hellwig va->va_start = (unsigned long)tmp->addr;
4496208162f4SChristoph Hellwig va->va_end = va->va_start + tmp->size;
4497208162f4SChristoph Hellwig va->vm = tmp;
4498208162f4SChristoph Hellwig insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
4499208162f4SChristoph Hellwig }
4500208162f4SChristoph Hellwig
4501208162f4SChristoph Hellwig /*
4502208162f4SChristoph Hellwig * Now we can initialize a free vmap space.
4503208162f4SChristoph Hellwig */
4504208162f4SChristoph Hellwig vmap_init_free_space();
4505208162f4SChristoph Hellwig vmap_initialized = true;
4506208162f4SChristoph Hellwig }
4507