1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 41da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 51da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 61da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 8d758ffe6SUladzislau Rezki (Sony) * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 15c3edc401SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/spinlock.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 21868b104dSRick Edgecombe #include <linux/set_memory.h> 223ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2323016969SChristoph Lameter #include <linux/kallsyms.h> 24db64fe02SNick Piggin #include <linux/list.h> 254da56b99SChris Wilson #include <linux/notifier.h> 26db64fe02SNick Piggin #include <linux/rbtree.h> 270f14599cSMatthew Wilcox (Oracle) #include <linux/xarray.h> 285da96bddSMel Gorman #include <linux/io.h> 29db64fe02SNick Piggin #include <linux/rcupdate.h> 30f0aa6617STejun Heo #include <linux/pfn.h> 3189219d37SCatalin Marinas #include <linux/kmemleak.h> 3260063497SArun Sharma #include <linux/atomic.h> 333b32123dSGideon Israel Dsouza #include <linux/compiler.h> 344e5aa1f4SShakeel Butt #include <linux/memcontrol.h> 3532fcfd40SAl Viro #include <linux/llist.h> 360f616be1SToshi Kani #include <linux/bitops.h> 3768ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h> 38bdebd6a2SJann Horn #include <linux/overflow.h> 39c0eb315aSNicholas Piggin #include <linux/pgtable.h> 407c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 41f7ee1f13SChristophe Leroy #include <linux/hugetlb.h> 42451769ebSMichal Hocko #include <linux/sched/mm.h> 431da177e4SLinus Torvalds #include <asm/tlbflush.h> 442dca6999SDavid Miller #include <asm/shmparam.h> 451da177e4SLinus Torvalds 46dd56b046SMel Gorman #include "internal.h" 472a681cfaSJoerg Roedel #include "pgalloc-track.h" 48dd56b046SMel Gorman 4982a70ce0SChristoph Hellwig #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 5082a70ce0SChristoph Hellwig static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; 5182a70ce0SChristoph Hellwig 5282a70ce0SChristoph Hellwig static int __init set_nohugeiomap(char *str) 5382a70ce0SChristoph Hellwig { 5482a70ce0SChristoph Hellwig ioremap_max_page_shift = PAGE_SHIFT; 5582a70ce0SChristoph Hellwig return 0; 5682a70ce0SChristoph Hellwig } 5782a70ce0SChristoph Hellwig early_param("nohugeiomap", set_nohugeiomap); 5882a70ce0SChristoph Hellwig #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 5982a70ce0SChristoph Hellwig static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; 6082a70ce0SChristoph Hellwig #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 6182a70ce0SChristoph Hellwig 62121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 63121e6f32SNicholas Piggin static bool __ro_after_init vmap_allow_huge = true; 64121e6f32SNicholas Piggin 65121e6f32SNicholas Piggin static int __init set_nohugevmalloc(char *str) 66121e6f32SNicholas Piggin { 67121e6f32SNicholas Piggin vmap_allow_huge = false; 68121e6f32SNicholas Piggin return 0; 69121e6f32SNicholas Piggin } 70121e6f32SNicholas Piggin early_param("nohugevmalloc", set_nohugevmalloc); 71121e6f32SNicholas Piggin #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 72121e6f32SNicholas Piggin static const bool vmap_allow_huge = false; 73121e6f32SNicholas Piggin #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 74121e6f32SNicholas Piggin 75186525bdSIngo Molnar bool is_vmalloc_addr(const void *x) 76186525bdSIngo Molnar { 774aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x); 78186525bdSIngo Molnar 79186525bdSIngo Molnar return addr >= VMALLOC_START && addr < VMALLOC_END; 80186525bdSIngo Molnar } 81186525bdSIngo Molnar EXPORT_SYMBOL(is_vmalloc_addr); 82186525bdSIngo Molnar 8332fcfd40SAl Viro struct vfree_deferred { 8432fcfd40SAl Viro struct llist_head list; 8532fcfd40SAl Viro struct work_struct wq; 8632fcfd40SAl Viro }; 8732fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 8832fcfd40SAl Viro 8932fcfd40SAl Viro static void __vunmap(const void *, int); 9032fcfd40SAl Viro 9132fcfd40SAl Viro static void free_work(struct work_struct *w) 9232fcfd40SAl Viro { 9332fcfd40SAl Viro struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 94894e58c1SByungchul Park struct llist_node *t, *llnode; 95894e58c1SByungchul Park 96894e58c1SByungchul Park llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 97894e58c1SByungchul Park __vunmap((void *)llnode, 1); 9832fcfd40SAl Viro } 9932fcfd40SAl Viro 100db64fe02SNick Piggin /*** Page table manipulation functions ***/ 1015e9e3d77SNicholas Piggin static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 1025e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 103f7ee1f13SChristophe Leroy unsigned int max_page_shift, pgtbl_mod_mask *mask) 1045e9e3d77SNicholas Piggin { 1055e9e3d77SNicholas Piggin pte_t *pte; 1065e9e3d77SNicholas Piggin u64 pfn; 107f7ee1f13SChristophe Leroy unsigned long size = PAGE_SIZE; 1085e9e3d77SNicholas Piggin 1095e9e3d77SNicholas Piggin pfn = phys_addr >> PAGE_SHIFT; 1105e9e3d77SNicholas Piggin pte = pte_alloc_kernel_track(pmd, addr, mask); 1115e9e3d77SNicholas Piggin if (!pte) 1125e9e3d77SNicholas Piggin return -ENOMEM; 1135e9e3d77SNicholas Piggin do { 1145e9e3d77SNicholas Piggin BUG_ON(!pte_none(*pte)); 115f7ee1f13SChristophe Leroy 116f7ee1f13SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE 117f7ee1f13SChristophe Leroy size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); 118f7ee1f13SChristophe Leroy if (size != PAGE_SIZE) { 119f7ee1f13SChristophe Leroy pte_t entry = pfn_pte(pfn, prot); 120f7ee1f13SChristophe Leroy 121f7ee1f13SChristophe Leroy entry = arch_make_huge_pte(entry, ilog2(size), 0); 122f7ee1f13SChristophe Leroy set_huge_pte_at(&init_mm, addr, pte, entry); 123f7ee1f13SChristophe Leroy pfn += PFN_DOWN(size); 124f7ee1f13SChristophe Leroy continue; 125f7ee1f13SChristophe Leroy } 126f7ee1f13SChristophe Leroy #endif 1275e9e3d77SNicholas Piggin set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 1285e9e3d77SNicholas Piggin pfn++; 129f7ee1f13SChristophe Leroy } while (pte += PFN_DOWN(size), addr += size, addr != end); 1305e9e3d77SNicholas Piggin *mask |= PGTBL_PTE_MODIFIED; 1315e9e3d77SNicholas Piggin return 0; 1325e9e3d77SNicholas Piggin } 1335e9e3d77SNicholas Piggin 1345e9e3d77SNicholas Piggin static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 1355e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1365e9e3d77SNicholas Piggin unsigned int max_page_shift) 1375e9e3d77SNicholas Piggin { 1385e9e3d77SNicholas Piggin if (max_page_shift < PMD_SHIFT) 1395e9e3d77SNicholas Piggin return 0; 1405e9e3d77SNicholas Piggin 1415e9e3d77SNicholas Piggin if (!arch_vmap_pmd_supported(prot)) 1425e9e3d77SNicholas Piggin return 0; 1435e9e3d77SNicholas Piggin 1445e9e3d77SNicholas Piggin if ((end - addr) != PMD_SIZE) 1455e9e3d77SNicholas Piggin return 0; 1465e9e3d77SNicholas Piggin 1475e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PMD_SIZE)) 1485e9e3d77SNicholas Piggin return 0; 1495e9e3d77SNicholas Piggin 1505e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 1515e9e3d77SNicholas Piggin return 0; 1525e9e3d77SNicholas Piggin 1535e9e3d77SNicholas Piggin if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 1545e9e3d77SNicholas Piggin return 0; 1555e9e3d77SNicholas Piggin 1565e9e3d77SNicholas Piggin return pmd_set_huge(pmd, phys_addr, prot); 1575e9e3d77SNicholas Piggin } 1585e9e3d77SNicholas Piggin 1595e9e3d77SNicholas Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 1605e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1615e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 1625e9e3d77SNicholas Piggin { 1635e9e3d77SNicholas Piggin pmd_t *pmd; 1645e9e3d77SNicholas Piggin unsigned long next; 1655e9e3d77SNicholas Piggin 1665e9e3d77SNicholas Piggin pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 1675e9e3d77SNicholas Piggin if (!pmd) 1685e9e3d77SNicholas Piggin return -ENOMEM; 1695e9e3d77SNicholas Piggin do { 1705e9e3d77SNicholas Piggin next = pmd_addr_end(addr, end); 1715e9e3d77SNicholas Piggin 1725e9e3d77SNicholas Piggin if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 1735e9e3d77SNicholas Piggin max_page_shift)) { 1745e9e3d77SNicholas Piggin *mask |= PGTBL_PMD_MODIFIED; 1755e9e3d77SNicholas Piggin continue; 1765e9e3d77SNicholas Piggin } 1775e9e3d77SNicholas Piggin 178f7ee1f13SChristophe Leroy if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) 1795e9e3d77SNicholas Piggin return -ENOMEM; 1805e9e3d77SNicholas Piggin } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 1815e9e3d77SNicholas Piggin return 0; 1825e9e3d77SNicholas Piggin } 1835e9e3d77SNicholas Piggin 1845e9e3d77SNicholas Piggin static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 1855e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1865e9e3d77SNicholas Piggin unsigned int max_page_shift) 1875e9e3d77SNicholas Piggin { 1885e9e3d77SNicholas Piggin if (max_page_shift < PUD_SHIFT) 1895e9e3d77SNicholas Piggin return 0; 1905e9e3d77SNicholas Piggin 1915e9e3d77SNicholas Piggin if (!arch_vmap_pud_supported(prot)) 1925e9e3d77SNicholas Piggin return 0; 1935e9e3d77SNicholas Piggin 1945e9e3d77SNicholas Piggin if ((end - addr) != PUD_SIZE) 1955e9e3d77SNicholas Piggin return 0; 1965e9e3d77SNicholas Piggin 1975e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PUD_SIZE)) 1985e9e3d77SNicholas Piggin return 0; 1995e9e3d77SNicholas Piggin 2005e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 2015e9e3d77SNicholas Piggin return 0; 2025e9e3d77SNicholas Piggin 2035e9e3d77SNicholas Piggin if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 2045e9e3d77SNicholas Piggin return 0; 2055e9e3d77SNicholas Piggin 2065e9e3d77SNicholas Piggin return pud_set_huge(pud, phys_addr, prot); 2075e9e3d77SNicholas Piggin } 2085e9e3d77SNicholas Piggin 2095e9e3d77SNicholas Piggin static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 2105e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2115e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 2125e9e3d77SNicholas Piggin { 2135e9e3d77SNicholas Piggin pud_t *pud; 2145e9e3d77SNicholas Piggin unsigned long next; 2155e9e3d77SNicholas Piggin 2165e9e3d77SNicholas Piggin pud = pud_alloc_track(&init_mm, p4d, addr, mask); 2175e9e3d77SNicholas Piggin if (!pud) 2185e9e3d77SNicholas Piggin return -ENOMEM; 2195e9e3d77SNicholas Piggin do { 2205e9e3d77SNicholas Piggin next = pud_addr_end(addr, end); 2215e9e3d77SNicholas Piggin 2225e9e3d77SNicholas Piggin if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 2235e9e3d77SNicholas Piggin max_page_shift)) { 2245e9e3d77SNicholas Piggin *mask |= PGTBL_PUD_MODIFIED; 2255e9e3d77SNicholas Piggin continue; 2265e9e3d77SNicholas Piggin } 2275e9e3d77SNicholas Piggin 2285e9e3d77SNicholas Piggin if (vmap_pmd_range(pud, addr, next, phys_addr, prot, 2295e9e3d77SNicholas Piggin max_page_shift, mask)) 2305e9e3d77SNicholas Piggin return -ENOMEM; 2315e9e3d77SNicholas Piggin } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 2325e9e3d77SNicholas Piggin return 0; 2335e9e3d77SNicholas Piggin } 2345e9e3d77SNicholas Piggin 2355e9e3d77SNicholas Piggin static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 2365e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2375e9e3d77SNicholas Piggin unsigned int max_page_shift) 2385e9e3d77SNicholas Piggin { 2395e9e3d77SNicholas Piggin if (max_page_shift < P4D_SHIFT) 2405e9e3d77SNicholas Piggin return 0; 2415e9e3d77SNicholas Piggin 2425e9e3d77SNicholas Piggin if (!arch_vmap_p4d_supported(prot)) 2435e9e3d77SNicholas Piggin return 0; 2445e9e3d77SNicholas Piggin 2455e9e3d77SNicholas Piggin if ((end - addr) != P4D_SIZE) 2465e9e3d77SNicholas Piggin return 0; 2475e9e3d77SNicholas Piggin 2485e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, P4D_SIZE)) 2495e9e3d77SNicholas Piggin return 0; 2505e9e3d77SNicholas Piggin 2515e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 2525e9e3d77SNicholas Piggin return 0; 2535e9e3d77SNicholas Piggin 2545e9e3d77SNicholas Piggin if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 2555e9e3d77SNicholas Piggin return 0; 2565e9e3d77SNicholas Piggin 2575e9e3d77SNicholas Piggin return p4d_set_huge(p4d, phys_addr, prot); 2585e9e3d77SNicholas Piggin } 2595e9e3d77SNicholas Piggin 2605e9e3d77SNicholas Piggin static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 2615e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2625e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 2635e9e3d77SNicholas Piggin { 2645e9e3d77SNicholas Piggin p4d_t *p4d; 2655e9e3d77SNicholas Piggin unsigned long next; 2665e9e3d77SNicholas Piggin 2675e9e3d77SNicholas Piggin p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 2685e9e3d77SNicholas Piggin if (!p4d) 2695e9e3d77SNicholas Piggin return -ENOMEM; 2705e9e3d77SNicholas Piggin do { 2715e9e3d77SNicholas Piggin next = p4d_addr_end(addr, end); 2725e9e3d77SNicholas Piggin 2735e9e3d77SNicholas Piggin if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 2745e9e3d77SNicholas Piggin max_page_shift)) { 2755e9e3d77SNicholas Piggin *mask |= PGTBL_P4D_MODIFIED; 2765e9e3d77SNicholas Piggin continue; 2775e9e3d77SNicholas Piggin } 2785e9e3d77SNicholas Piggin 2795e9e3d77SNicholas Piggin if (vmap_pud_range(p4d, addr, next, phys_addr, prot, 2805e9e3d77SNicholas Piggin max_page_shift, mask)) 2815e9e3d77SNicholas Piggin return -ENOMEM; 2825e9e3d77SNicholas Piggin } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 2835e9e3d77SNicholas Piggin return 0; 2845e9e3d77SNicholas Piggin } 2855e9e3d77SNicholas Piggin 2865d87510dSNicholas Piggin static int vmap_range_noflush(unsigned long addr, unsigned long end, 2875e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2885e9e3d77SNicholas Piggin unsigned int max_page_shift) 2895e9e3d77SNicholas Piggin { 2905e9e3d77SNicholas Piggin pgd_t *pgd; 2915e9e3d77SNicholas Piggin unsigned long start; 2925e9e3d77SNicholas Piggin unsigned long next; 2935e9e3d77SNicholas Piggin int err; 2945e9e3d77SNicholas Piggin pgtbl_mod_mask mask = 0; 2955e9e3d77SNicholas Piggin 2965e9e3d77SNicholas Piggin might_sleep(); 2975e9e3d77SNicholas Piggin BUG_ON(addr >= end); 2985e9e3d77SNicholas Piggin 2995e9e3d77SNicholas Piggin start = addr; 3005e9e3d77SNicholas Piggin pgd = pgd_offset_k(addr); 3015e9e3d77SNicholas Piggin do { 3025e9e3d77SNicholas Piggin next = pgd_addr_end(addr, end); 3035e9e3d77SNicholas Piggin err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 3045e9e3d77SNicholas Piggin max_page_shift, &mask); 3055e9e3d77SNicholas Piggin if (err) 3065e9e3d77SNicholas Piggin break; 3075e9e3d77SNicholas Piggin } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 3085e9e3d77SNicholas Piggin 3095e9e3d77SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 3105e9e3d77SNicholas Piggin arch_sync_kernel_mappings(start, end); 3115e9e3d77SNicholas Piggin 3125e9e3d77SNicholas Piggin return err; 3135e9e3d77SNicholas Piggin } 314b221385bSAdrian Bunk 31582a70ce0SChristoph Hellwig int ioremap_page_range(unsigned long addr, unsigned long end, 31682a70ce0SChristoph Hellwig phys_addr_t phys_addr, pgprot_t prot) 3175d87510dSNicholas Piggin { 3185d87510dSNicholas Piggin int err; 3195d87510dSNicholas Piggin 3208491502fSChristoph Hellwig err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 32182a70ce0SChristoph Hellwig ioremap_max_page_shift); 3225d87510dSNicholas Piggin flush_cache_vmap(addr, end); 3235d87510dSNicholas Piggin return err; 3245d87510dSNicholas Piggin } 3255d87510dSNicholas Piggin 3262ba3e694SJoerg Roedel static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 3272ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3281da177e4SLinus Torvalds { 3291da177e4SLinus Torvalds pte_t *pte; 3301da177e4SLinus Torvalds 3311da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 3321da177e4SLinus Torvalds do { 3331da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 3341da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 3351da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 3362ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 3371da177e4SLinus Torvalds } 3381da177e4SLinus Torvalds 3392ba3e694SJoerg Roedel static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 3402ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3411da177e4SLinus Torvalds { 3421da177e4SLinus Torvalds pmd_t *pmd; 3431da177e4SLinus Torvalds unsigned long next; 3442ba3e694SJoerg Roedel int cleared; 3451da177e4SLinus Torvalds 3461da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 3471da177e4SLinus Torvalds do { 3481da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 3492ba3e694SJoerg Roedel 3502ba3e694SJoerg Roedel cleared = pmd_clear_huge(pmd); 3512ba3e694SJoerg Roedel if (cleared || pmd_bad(*pmd)) 3522ba3e694SJoerg Roedel *mask |= PGTBL_PMD_MODIFIED; 3532ba3e694SJoerg Roedel 3542ba3e694SJoerg Roedel if (cleared) 355b9820d8fSToshi Kani continue; 3561da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 3571da177e4SLinus Torvalds continue; 3582ba3e694SJoerg Roedel vunmap_pte_range(pmd, addr, next, mask); 359e47110e9SAneesh Kumar K.V 360e47110e9SAneesh Kumar K.V cond_resched(); 3611da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 3621da177e4SLinus Torvalds } 3631da177e4SLinus Torvalds 3642ba3e694SJoerg Roedel static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 3652ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3661da177e4SLinus Torvalds { 3671da177e4SLinus Torvalds pud_t *pud; 3681da177e4SLinus Torvalds unsigned long next; 3692ba3e694SJoerg Roedel int cleared; 3701da177e4SLinus Torvalds 371c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 3721da177e4SLinus Torvalds do { 3731da177e4SLinus Torvalds next = pud_addr_end(addr, end); 3742ba3e694SJoerg Roedel 3752ba3e694SJoerg Roedel cleared = pud_clear_huge(pud); 3762ba3e694SJoerg Roedel if (cleared || pud_bad(*pud)) 3772ba3e694SJoerg Roedel *mask |= PGTBL_PUD_MODIFIED; 3782ba3e694SJoerg Roedel 3792ba3e694SJoerg Roedel if (cleared) 380b9820d8fSToshi Kani continue; 3811da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 3821da177e4SLinus Torvalds continue; 3832ba3e694SJoerg Roedel vunmap_pmd_range(pud, addr, next, mask); 3841da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 3851da177e4SLinus Torvalds } 3861da177e4SLinus Torvalds 3872ba3e694SJoerg Roedel static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 3882ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 389c2febafcSKirill A. Shutemov { 390c2febafcSKirill A. Shutemov p4d_t *p4d; 391c2febafcSKirill A. Shutemov unsigned long next; 392c2febafcSKirill A. Shutemov 393c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 394c2febafcSKirill A. Shutemov do { 395c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 3962ba3e694SJoerg Roedel 397c8db8c26SLi kunyu p4d_clear_huge(p4d); 398c8db8c26SLi kunyu if (p4d_bad(*p4d)) 3992ba3e694SJoerg Roedel *mask |= PGTBL_P4D_MODIFIED; 4002ba3e694SJoerg Roedel 401c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 402c2febafcSKirill A. Shutemov continue; 4032ba3e694SJoerg Roedel vunmap_pud_range(p4d, addr, next, mask); 404c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 405c2febafcSKirill A. Shutemov } 406c2febafcSKirill A. Shutemov 4074ad0ae8cSNicholas Piggin /* 4084ad0ae8cSNicholas Piggin * vunmap_range_noflush is similar to vunmap_range, but does not 4094ad0ae8cSNicholas Piggin * flush caches or TLBs. 410b521c43fSChristoph Hellwig * 4114ad0ae8cSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() before calling 4124ad0ae8cSNicholas Piggin * this function, and flush_tlb_kernel_range after it has returned 4134ad0ae8cSNicholas Piggin * successfully (and before the addresses are expected to cause a page fault 4144ad0ae8cSNicholas Piggin * or be re-mapped for something else, if TLB flushes are being delayed or 4154ad0ae8cSNicholas Piggin * coalesced). 416b521c43fSChristoph Hellwig * 4174ad0ae8cSNicholas Piggin * This is an internal function only. Do not use outside mm/. 418b521c43fSChristoph Hellwig */ 4194ad0ae8cSNicholas Piggin void vunmap_range_noflush(unsigned long start, unsigned long end) 4201da177e4SLinus Torvalds { 4211da177e4SLinus Torvalds unsigned long next; 422b521c43fSChristoph Hellwig pgd_t *pgd; 4232ba3e694SJoerg Roedel unsigned long addr = start; 4242ba3e694SJoerg Roedel pgtbl_mod_mask mask = 0; 4251da177e4SLinus Torvalds 4261da177e4SLinus Torvalds BUG_ON(addr >= end); 4271da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 4281da177e4SLinus Torvalds do { 4291da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 4302ba3e694SJoerg Roedel if (pgd_bad(*pgd)) 4312ba3e694SJoerg Roedel mask |= PGTBL_PGD_MODIFIED; 4321da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 4331da177e4SLinus Torvalds continue; 4342ba3e694SJoerg Roedel vunmap_p4d_range(pgd, addr, next, &mask); 4351da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 4362ba3e694SJoerg Roedel 4372ba3e694SJoerg Roedel if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 4382ba3e694SJoerg Roedel arch_sync_kernel_mappings(start, end); 4391da177e4SLinus Torvalds } 4401da177e4SLinus Torvalds 4414ad0ae8cSNicholas Piggin /** 4424ad0ae8cSNicholas Piggin * vunmap_range - unmap kernel virtual addresses 4434ad0ae8cSNicholas Piggin * @addr: start of the VM area to unmap 4444ad0ae8cSNicholas Piggin * @end: end of the VM area to unmap (non-inclusive) 4454ad0ae8cSNicholas Piggin * 4464ad0ae8cSNicholas Piggin * Clears any present PTEs in the virtual address range, flushes TLBs and 4474ad0ae8cSNicholas Piggin * caches. Any subsequent access to the address before it has been re-mapped 4484ad0ae8cSNicholas Piggin * is a kernel bug. 4494ad0ae8cSNicholas Piggin */ 4504ad0ae8cSNicholas Piggin void vunmap_range(unsigned long addr, unsigned long end) 4514ad0ae8cSNicholas Piggin { 4524ad0ae8cSNicholas Piggin flush_cache_vunmap(addr, end); 4534ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, end); 4544ad0ae8cSNicholas Piggin flush_tlb_kernel_range(addr, end); 4554ad0ae8cSNicholas Piggin } 4564ad0ae8cSNicholas Piggin 4570a264884SNicholas Piggin static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 4582ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 4592ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 4601da177e4SLinus Torvalds { 4611da177e4SLinus Torvalds pte_t *pte; 4621da177e4SLinus Torvalds 463db64fe02SNick Piggin /* 464db64fe02SNick Piggin * nr is a running index into the array which helps higher level 465db64fe02SNick Piggin * callers keep track of where we're up to. 466db64fe02SNick Piggin */ 467db64fe02SNick Piggin 4682ba3e694SJoerg Roedel pte = pte_alloc_kernel_track(pmd, addr, mask); 4691da177e4SLinus Torvalds if (!pte) 4701da177e4SLinus Torvalds return -ENOMEM; 4711da177e4SLinus Torvalds do { 472db64fe02SNick Piggin struct page *page = pages[*nr]; 473db64fe02SNick Piggin 474db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 475db64fe02SNick Piggin return -EBUSY; 476db64fe02SNick Piggin if (WARN_ON(!page)) 4771da177e4SLinus Torvalds return -ENOMEM; 4784fcdcc12SYury Norov if (WARN_ON(!pfn_valid(page_to_pfn(page)))) 4794fcdcc12SYury Norov return -EINVAL; 4804fcdcc12SYury Norov 4811da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 482db64fe02SNick Piggin (*nr)++; 4831da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 4842ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 4851da177e4SLinus Torvalds return 0; 4861da177e4SLinus Torvalds } 4871da177e4SLinus Torvalds 4880a264884SNicholas Piggin static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 4892ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 4902ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 4911da177e4SLinus Torvalds { 4921da177e4SLinus Torvalds pmd_t *pmd; 4931da177e4SLinus Torvalds unsigned long next; 4941da177e4SLinus Torvalds 4952ba3e694SJoerg Roedel pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 4961da177e4SLinus Torvalds if (!pmd) 4971da177e4SLinus Torvalds return -ENOMEM; 4981da177e4SLinus Torvalds do { 4991da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 5000a264884SNicholas Piggin if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 5011da177e4SLinus Torvalds return -ENOMEM; 5021da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 5031da177e4SLinus Torvalds return 0; 5041da177e4SLinus Torvalds } 5051da177e4SLinus Torvalds 5060a264884SNicholas Piggin static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 5072ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 5082ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 5091da177e4SLinus Torvalds { 5101da177e4SLinus Torvalds pud_t *pud; 5111da177e4SLinus Torvalds unsigned long next; 5121da177e4SLinus Torvalds 5132ba3e694SJoerg Roedel pud = pud_alloc_track(&init_mm, p4d, addr, mask); 5141da177e4SLinus Torvalds if (!pud) 5151da177e4SLinus Torvalds return -ENOMEM; 5161da177e4SLinus Torvalds do { 5171da177e4SLinus Torvalds next = pud_addr_end(addr, end); 5180a264884SNicholas Piggin if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 5191da177e4SLinus Torvalds return -ENOMEM; 5201da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 5211da177e4SLinus Torvalds return 0; 5221da177e4SLinus Torvalds } 5231da177e4SLinus Torvalds 5240a264884SNicholas Piggin static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 5252ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 5262ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 527c2febafcSKirill A. Shutemov { 528c2febafcSKirill A. Shutemov p4d_t *p4d; 529c2febafcSKirill A. Shutemov unsigned long next; 530c2febafcSKirill A. Shutemov 5312ba3e694SJoerg Roedel p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 532c2febafcSKirill A. Shutemov if (!p4d) 533c2febafcSKirill A. Shutemov return -ENOMEM; 534c2febafcSKirill A. Shutemov do { 535c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 5360a264884SNicholas Piggin if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 537c2febafcSKirill A. Shutemov return -ENOMEM; 538c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 539c2febafcSKirill A. Shutemov return 0; 540c2febafcSKirill A. Shutemov } 541c2febafcSKirill A. Shutemov 542121e6f32SNicholas Piggin static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 543121e6f32SNicholas Piggin pgprot_t prot, struct page **pages) 544121e6f32SNicholas Piggin { 545121e6f32SNicholas Piggin unsigned long start = addr; 546121e6f32SNicholas Piggin pgd_t *pgd; 547121e6f32SNicholas Piggin unsigned long next; 548121e6f32SNicholas Piggin int err = 0; 549121e6f32SNicholas Piggin int nr = 0; 550121e6f32SNicholas Piggin pgtbl_mod_mask mask = 0; 551121e6f32SNicholas Piggin 552121e6f32SNicholas Piggin BUG_ON(addr >= end); 553121e6f32SNicholas Piggin pgd = pgd_offset_k(addr); 554121e6f32SNicholas Piggin do { 555121e6f32SNicholas Piggin next = pgd_addr_end(addr, end); 556121e6f32SNicholas Piggin if (pgd_bad(*pgd)) 557121e6f32SNicholas Piggin mask |= PGTBL_PGD_MODIFIED; 558121e6f32SNicholas Piggin err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 559121e6f32SNicholas Piggin if (err) 560121e6f32SNicholas Piggin return err; 561121e6f32SNicholas Piggin } while (pgd++, addr = next, addr != end); 562121e6f32SNicholas Piggin 563121e6f32SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 564121e6f32SNicholas Piggin arch_sync_kernel_mappings(start, end); 565121e6f32SNicholas Piggin 566121e6f32SNicholas Piggin return 0; 567121e6f32SNicholas Piggin } 568121e6f32SNicholas Piggin 569b67177ecSNicholas Piggin /* 570b67177ecSNicholas Piggin * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 571b67177ecSNicholas Piggin * flush caches. 572b67177ecSNicholas Piggin * 573b67177ecSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() after this 574b67177ecSNicholas Piggin * function returns successfully and before the addresses are accessed. 575b67177ecSNicholas Piggin * 576b67177ecSNicholas Piggin * This is an internal function only. Do not use outside mm/. 577b67177ecSNicholas Piggin */ 578b67177ecSNicholas Piggin int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 579121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift) 580121e6f32SNicholas Piggin { 581121e6f32SNicholas Piggin unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 582121e6f32SNicholas Piggin 583121e6f32SNicholas Piggin WARN_ON(page_shift < PAGE_SHIFT); 584121e6f32SNicholas Piggin 585121e6f32SNicholas Piggin if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 586121e6f32SNicholas Piggin page_shift == PAGE_SHIFT) 587121e6f32SNicholas Piggin return vmap_small_pages_range_noflush(addr, end, prot, pages); 588121e6f32SNicholas Piggin 589121e6f32SNicholas Piggin for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 590121e6f32SNicholas Piggin int err; 591121e6f32SNicholas Piggin 592121e6f32SNicholas Piggin err = vmap_range_noflush(addr, addr + (1UL << page_shift), 593121e6f32SNicholas Piggin __pa(page_address(pages[i])), prot, 594121e6f32SNicholas Piggin page_shift); 595121e6f32SNicholas Piggin if (err) 596121e6f32SNicholas Piggin return err; 597121e6f32SNicholas Piggin 598121e6f32SNicholas Piggin addr += 1UL << page_shift; 599121e6f32SNicholas Piggin } 600121e6f32SNicholas Piggin 601121e6f32SNicholas Piggin return 0; 602121e6f32SNicholas Piggin } 603121e6f32SNicholas Piggin 604b67177ecSNicholas Piggin /** 605b67177ecSNicholas Piggin * vmap_pages_range - map pages to a kernel virtual address 606b67177ecSNicholas Piggin * @addr: start of the VM area to map 607b67177ecSNicholas Piggin * @end: end of the VM area to map (non-inclusive) 608b67177ecSNicholas Piggin * @prot: page protection flags to use 609b67177ecSNicholas Piggin * @pages: pages to map (always PAGE_SIZE pages) 610b67177ecSNicholas Piggin * @page_shift: maximum shift that the pages may be mapped with, @pages must 611b67177ecSNicholas Piggin * be aligned and contiguous up to at least this shift. 612b67177ecSNicholas Piggin * 613b67177ecSNicholas Piggin * RETURNS: 614b67177ecSNicholas Piggin * 0 on success, -errno on failure. 615b67177ecSNicholas Piggin */ 616121e6f32SNicholas Piggin static int vmap_pages_range(unsigned long addr, unsigned long end, 617121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift) 618121e6f32SNicholas Piggin { 619121e6f32SNicholas Piggin int err; 620121e6f32SNicholas Piggin 621121e6f32SNicholas Piggin err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 622121e6f32SNicholas Piggin flush_cache_vmap(addr, end); 623121e6f32SNicholas Piggin return err; 624121e6f32SNicholas Piggin } 625121e6f32SNicholas Piggin 62681ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 62773bdf0a6SLinus Torvalds { 62873bdf0a6SLinus Torvalds /* 629ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 63073bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 63173bdf0a6SLinus Torvalds * just put it in the vmalloc space. 63273bdf0a6SLinus Torvalds */ 63373bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 6344aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x); 63573bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 63673bdf0a6SLinus Torvalds return 1; 63773bdf0a6SLinus Torvalds #endif 63873bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 63973bdf0a6SLinus Torvalds } 64073bdf0a6SLinus Torvalds 64148667e7aSChristoph Lameter /* 642c0eb315aSNicholas Piggin * Walk a vmap address to the struct page it maps. Huge vmap mappings will 643c0eb315aSNicholas Piggin * return the tail page that corresponds to the base page address, which 644c0eb315aSNicholas Piggin * matches small vmap mappings. 64548667e7aSChristoph Lameter */ 646add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 64748667e7aSChristoph Lameter { 64848667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 649add688fbSmalc struct page *page = NULL; 65048667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 651c2febafcSKirill A. Shutemov p4d_t *p4d; 652c2febafcSKirill A. Shutemov pud_t *pud; 653c2febafcSKirill A. Shutemov pmd_t *pmd; 654c2febafcSKirill A. Shutemov pte_t *ptep, pte; 65548667e7aSChristoph Lameter 6567aa413deSIngo Molnar /* 6577aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 6587aa413deSIngo Molnar * architectures that do not vmalloc module space 6597aa413deSIngo Molnar */ 66073bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 66159ea7463SJiri Slaby 662c2febafcSKirill A. Shutemov if (pgd_none(*pgd)) 663c2febafcSKirill A. Shutemov return NULL; 664c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_leaf(*pgd))) 665c0eb315aSNicholas Piggin return NULL; /* XXX: no allowance for huge pgd */ 666c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_bad(*pgd))) 667c0eb315aSNicholas Piggin return NULL; 668c0eb315aSNicholas Piggin 669c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 670c2febafcSKirill A. Shutemov if (p4d_none(*p4d)) 671c2febafcSKirill A. Shutemov return NULL; 672c0eb315aSNicholas Piggin if (p4d_leaf(*p4d)) 673c0eb315aSNicholas Piggin return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 674c0eb315aSNicholas Piggin if (WARN_ON_ONCE(p4d_bad(*p4d))) 675c2febafcSKirill A. Shutemov return NULL; 676c0eb315aSNicholas Piggin 677c0eb315aSNicholas Piggin pud = pud_offset(p4d, addr); 678c0eb315aSNicholas Piggin if (pud_none(*pud)) 679c0eb315aSNicholas Piggin return NULL; 680c0eb315aSNicholas Piggin if (pud_leaf(*pud)) 681c0eb315aSNicholas Piggin return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 682c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pud_bad(*pud))) 683c0eb315aSNicholas Piggin return NULL; 684c0eb315aSNicholas Piggin 685c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr); 686c0eb315aSNicholas Piggin if (pmd_none(*pmd)) 687c0eb315aSNicholas Piggin return NULL; 688c0eb315aSNicholas Piggin if (pmd_leaf(*pmd)) 689c0eb315aSNicholas Piggin return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 690c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pmd_bad(*pmd))) 691c2febafcSKirill A. Shutemov return NULL; 692db64fe02SNick Piggin 69348667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 69448667e7aSChristoph Lameter pte = *ptep; 69548667e7aSChristoph Lameter if (pte_present(pte)) 696add688fbSmalc page = pte_page(pte); 69748667e7aSChristoph Lameter pte_unmap(ptep); 698c0eb315aSNicholas Piggin 699add688fbSmalc return page; 700ece86e22SJianyu Zhan } 701ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 702ece86e22SJianyu Zhan 703add688fbSmalc /* 704add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 705add688fbSmalc */ 706add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 707add688fbSmalc { 708add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 709add688fbSmalc } 710add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 711add688fbSmalc 712db64fe02SNick Piggin 713db64fe02SNick Piggin /*** Global kva allocator ***/ 714db64fe02SNick Piggin 715bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 716a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 717bb850f4dSUladzislau Rezki (Sony) 718db64fe02SNick Piggin 719db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 720e36176beSUladzislau Rezki (Sony) static DEFINE_SPINLOCK(free_vmap_area_lock); 721f1c4069eSJoonsoo Kim /* Export for kexec only */ 722f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list); 72389699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 72468ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly; 72589699605SNick Piggin 72696e2db45SUladzislau Rezki (Sony) static struct rb_root purge_vmap_area_root = RB_ROOT; 72796e2db45SUladzislau Rezki (Sony) static LIST_HEAD(purge_vmap_area_list); 72896e2db45SUladzislau Rezki (Sony) static DEFINE_SPINLOCK(purge_vmap_area_lock); 72996e2db45SUladzislau Rezki (Sony) 73068ad4a33SUladzislau Rezki (Sony) /* 73168ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of 73268ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to 73368ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of 73468ad4a33SUladzislau Rezki (Sony) * free block. 73568ad4a33SUladzislau Rezki (Sony) */ 73668ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep; 73789699605SNick Piggin 73868ad4a33SUladzislau Rezki (Sony) /* 73968ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root. 74068ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing. 74168ad4a33SUladzislau Rezki (Sony) */ 74268ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list); 74368ad4a33SUladzislau Rezki (Sony) 74468ad4a33SUladzislau Rezki (Sony) /* 74568ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space. 74668ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start 74768ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap 74868ad4a33SUladzislau Rezki (Sony) * object is released. 74968ad4a33SUladzislau Rezki (Sony) * 75068ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block 75168ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to 75268ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area. 75368ad4a33SUladzislau Rezki (Sony) */ 75468ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT; 75568ad4a33SUladzislau Rezki (Sony) 75682dd23e8SUladzislau Rezki (Sony) /* 75782dd23e8SUladzislau Rezki (Sony) * Preload a CPU with one object for "no edge" split case. The 75882dd23e8SUladzislau Rezki (Sony) * aim is to get rid of allocations from the atomic context, thus 75982dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks. 76082dd23e8SUladzislau Rezki (Sony) */ 76182dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 76282dd23e8SUladzislau Rezki (Sony) 76368ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 76468ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va) 76568ad4a33SUladzislau Rezki (Sony) { 76668ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start); 76768ad4a33SUladzislau Rezki (Sony) } 76868ad4a33SUladzislau Rezki (Sony) 76968ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 77068ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node) 77168ad4a33SUladzislau Rezki (Sony) { 77268ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 77368ad4a33SUladzislau Rezki (Sony) 77468ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node); 77568ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0; 77668ad4a33SUladzislau Rezki (Sony) } 77768ad4a33SUladzislau Rezki (Sony) 778315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 779315cc066SMichel Lespinasse struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 78068ad4a33SUladzislau Rezki (Sony) 78168ad4a33SUladzislau Rezki (Sony) static void purge_vmap_area_lazy(void); 78268ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 783690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work); 784690467c8SUladzislau Rezki (Sony) static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 785db64fe02SNick Piggin 78697105f0aSRoman Gushchin static atomic_long_t nr_vmalloc_pages; 78797105f0aSRoman Gushchin 78897105f0aSRoman Gushchin unsigned long vmalloc_nr_pages(void) 78997105f0aSRoman Gushchin { 79097105f0aSRoman Gushchin return atomic_long_read(&nr_vmalloc_pages); 79197105f0aSRoman Gushchin } 79297105f0aSRoman Gushchin 793153090f2SBaoquan He /* Look up the first VA which satisfies addr < va_end, NULL if none. */ 794f181234aSChen Wandun static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr) 795f181234aSChen Wandun { 796f181234aSChen Wandun struct vmap_area *va = NULL; 797f181234aSChen Wandun struct rb_node *n = vmap_area_root.rb_node; 798f181234aSChen Wandun 7994aff1dc4SAndrey Konovalov addr = (unsigned long)kasan_reset_tag((void *)addr); 8004aff1dc4SAndrey Konovalov 801f181234aSChen Wandun while (n) { 802f181234aSChen Wandun struct vmap_area *tmp; 803f181234aSChen Wandun 804f181234aSChen Wandun tmp = rb_entry(n, struct vmap_area, rb_node); 805f181234aSChen Wandun if (tmp->va_end > addr) { 806f181234aSChen Wandun va = tmp; 807f181234aSChen Wandun if (tmp->va_start <= addr) 808f181234aSChen Wandun break; 809f181234aSChen Wandun 810f181234aSChen Wandun n = n->rb_left; 811f181234aSChen Wandun } else 812f181234aSChen Wandun n = n->rb_right; 813f181234aSChen Wandun } 814f181234aSChen Wandun 815f181234aSChen Wandun return va; 816f181234aSChen Wandun } 817f181234aSChen Wandun 818*899c6efeSUladzislau Rezki (Sony) static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) 8191da177e4SLinus Torvalds { 820*899c6efeSUladzislau Rezki (Sony) struct rb_node *n = root->rb_node; 821db64fe02SNick Piggin 8224aff1dc4SAndrey Konovalov addr = (unsigned long)kasan_reset_tag((void *)addr); 8234aff1dc4SAndrey Konovalov 824db64fe02SNick Piggin while (n) { 825db64fe02SNick Piggin struct vmap_area *va; 826db64fe02SNick Piggin 827db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 828db64fe02SNick Piggin if (addr < va->va_start) 829db64fe02SNick Piggin n = n->rb_left; 830cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 831db64fe02SNick Piggin n = n->rb_right; 832db64fe02SNick Piggin else 833db64fe02SNick Piggin return va; 834db64fe02SNick Piggin } 835db64fe02SNick Piggin 836db64fe02SNick Piggin return NULL; 837db64fe02SNick Piggin } 838db64fe02SNick Piggin 83968ad4a33SUladzislau Rezki (Sony) /* 84068ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node 84168ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing. 8429c801f61SUladzislau Rezki (Sony) * 8439c801f61SUladzislau Rezki (Sony) * Otherwise NULL is returned. In that case all further 8449c801f61SUladzislau Rezki (Sony) * steps regarding inserting of conflicting overlap range 8459c801f61SUladzislau Rezki (Sony) * have to be declined and actually considered as a bug. 84668ad4a33SUladzislau Rezki (Sony) */ 84768ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node ** 84868ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va, 84968ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from, 85068ad4a33SUladzislau Rezki (Sony) struct rb_node **parent) 851db64fe02SNick Piggin { 852170168d0SNamhyung Kim struct vmap_area *tmp_va; 85368ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 854db64fe02SNick Piggin 85568ad4a33SUladzislau Rezki (Sony) if (root) { 85668ad4a33SUladzislau Rezki (Sony) link = &root->rb_node; 85768ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) { 85868ad4a33SUladzislau Rezki (Sony) *parent = NULL; 85968ad4a33SUladzislau Rezki (Sony) return link; 86068ad4a33SUladzislau Rezki (Sony) } 86168ad4a33SUladzislau Rezki (Sony) } else { 86268ad4a33SUladzislau Rezki (Sony) link = &from; 86368ad4a33SUladzislau Rezki (Sony) } 86468ad4a33SUladzislau Rezki (Sony) 86568ad4a33SUladzislau Rezki (Sony) /* 86668ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point 86768ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name 86868ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to. 86968ad4a33SUladzislau Rezki (Sony) */ 87068ad4a33SUladzislau Rezki (Sony) do { 87168ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node); 87268ad4a33SUladzislau Rezki (Sony) 87368ad4a33SUladzislau Rezki (Sony) /* 87468ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check. 87568ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right) 87668ad4a33SUladzislau Rezki (Sony) * or full overlaps. 87768ad4a33SUladzislau Rezki (Sony) */ 878753df96bSBaoquan He if (va->va_end <= tmp_va->va_start) 87968ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left; 880753df96bSBaoquan He else if (va->va_start >= tmp_va->va_end) 88168ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right; 8829c801f61SUladzislau Rezki (Sony) else { 8839c801f61SUladzislau Rezki (Sony) WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 8849c801f61SUladzislau Rezki (Sony) va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 8859c801f61SUladzislau Rezki (Sony) 8869c801f61SUladzislau Rezki (Sony) return NULL; 8879c801f61SUladzislau Rezki (Sony) } 88868ad4a33SUladzislau Rezki (Sony) } while (*link); 88968ad4a33SUladzislau Rezki (Sony) 89068ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node; 89168ad4a33SUladzislau Rezki (Sony) return link; 892db64fe02SNick Piggin } 893db64fe02SNick Piggin 89468ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head * 89568ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 89668ad4a33SUladzislau Rezki (Sony) { 89768ad4a33SUladzislau Rezki (Sony) struct list_head *list; 898db64fe02SNick Piggin 89968ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent)) 90068ad4a33SUladzislau Rezki (Sony) /* 90168ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors 90268ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means 90368ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not 90468ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway. 90568ad4a33SUladzislau Rezki (Sony) */ 90668ad4a33SUladzislau Rezki (Sony) return NULL; 90768ad4a33SUladzislau Rezki (Sony) 90868ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list; 90968ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list); 910db64fe02SNick Piggin } 911db64fe02SNick Piggin 91268ad4a33SUladzislau Rezki (Sony) static __always_inline void 9138eb510dbSUladzislau Rezki (Sony) __link_va(struct vmap_area *va, struct rb_root *root, 9148eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, 9158eb510dbSUladzislau Rezki (Sony) struct list_head *head, bool augment) 91668ad4a33SUladzislau Rezki (Sony) { 91768ad4a33SUladzislau Rezki (Sony) /* 91868ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can 91968ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node. 92068ad4a33SUladzislau Rezki (Sony) */ 92168ad4a33SUladzislau Rezki (Sony) if (likely(parent)) { 92268ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list; 92368ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link) 92468ad4a33SUladzislau Rezki (Sony) head = head->prev; 92568ad4a33SUladzislau Rezki (Sony) } 926db64fe02SNick Piggin 92768ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */ 92868ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link); 9298eb510dbSUladzislau Rezki (Sony) if (augment) { 93068ad4a33SUladzislau Rezki (Sony) /* 93168ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion 93268ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to 93368ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented(). 934153090f2SBaoquan He * It is because we populate the tree from the bottom 93568ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree. 93668ad4a33SUladzislau Rezki (Sony) * 93768ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion, 93868ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to 93968ad4a33SUladzislau Rezki (Sony) * the correct order later on. 94068ad4a33SUladzislau Rezki (Sony) */ 94168ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node, 94268ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 94368ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0; 94468ad4a33SUladzislau Rezki (Sony) } else { 94568ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root); 94668ad4a33SUladzislau Rezki (Sony) } 94768ad4a33SUladzislau Rezki (Sony) 94868ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */ 94968ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head); 95068ad4a33SUladzislau Rezki (Sony) } 95168ad4a33SUladzislau Rezki (Sony) 95268ad4a33SUladzislau Rezki (Sony) static __always_inline void 9538eb510dbSUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root, 9548eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, 9558eb510dbSUladzislau Rezki (Sony) struct list_head *head) 9568eb510dbSUladzislau Rezki (Sony) { 9578eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, false); 9588eb510dbSUladzislau Rezki (Sony) } 9598eb510dbSUladzislau Rezki (Sony) 9608eb510dbSUladzislau Rezki (Sony) static __always_inline void 9618eb510dbSUladzislau Rezki (Sony) link_va_augment(struct vmap_area *va, struct rb_root *root, 9628eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, 9638eb510dbSUladzislau Rezki (Sony) struct list_head *head) 9648eb510dbSUladzislau Rezki (Sony) { 9658eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, true); 9668eb510dbSUladzislau Rezki (Sony) } 9678eb510dbSUladzislau Rezki (Sony) 9688eb510dbSUladzislau Rezki (Sony) static __always_inline void 9698eb510dbSUladzislau Rezki (Sony) __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) 97068ad4a33SUladzislau Rezki (Sony) { 971460e42d1SUladzislau Rezki (Sony) if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 972460e42d1SUladzislau Rezki (Sony) return; 973460e42d1SUladzislau Rezki (Sony) 9748eb510dbSUladzislau Rezki (Sony) if (augment) 97568ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node, 97668ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 97768ad4a33SUladzislau Rezki (Sony) else 97868ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root); 97968ad4a33SUladzislau Rezki (Sony) 9805d7a7c54SUladzislau Rezki (Sony) list_del_init(&va->list); 98168ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node); 98268ad4a33SUladzislau Rezki (Sony) } 98368ad4a33SUladzislau Rezki (Sony) 9848eb510dbSUladzislau Rezki (Sony) static __always_inline void 9858eb510dbSUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root) 9868eb510dbSUladzislau Rezki (Sony) { 9878eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, false); 9888eb510dbSUladzislau Rezki (Sony) } 9898eb510dbSUladzislau Rezki (Sony) 9908eb510dbSUladzislau Rezki (Sony) static __always_inline void 9918eb510dbSUladzislau Rezki (Sony) unlink_va_augment(struct vmap_area *va, struct rb_root *root) 9928eb510dbSUladzislau Rezki (Sony) { 9938eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, true); 9948eb510dbSUladzislau Rezki (Sony) } 9958eb510dbSUladzislau Rezki (Sony) 996bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 997c3385e84SJiapeng Chong /* 998c3385e84SJiapeng Chong * Gets called when remove the node and rotate. 999c3385e84SJiapeng Chong */ 1000c3385e84SJiapeng Chong static __always_inline unsigned long 1001c3385e84SJiapeng Chong compute_subtree_max_size(struct vmap_area *va) 1002c3385e84SJiapeng Chong { 1003c3385e84SJiapeng Chong return max3(va_size(va), 1004c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_left), 1005c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_right)); 1006c3385e84SJiapeng Chong } 1007c3385e84SJiapeng Chong 1008bb850f4dSUladzislau Rezki (Sony) static void 1009da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(void) 1010bb850f4dSUladzislau Rezki (Sony) { 1011bb850f4dSUladzislau Rezki (Sony) struct vmap_area *va; 1012da27c9edSUladzislau Rezki (Sony) unsigned long computed_size; 1013bb850f4dSUladzislau Rezki (Sony) 1014da27c9edSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) { 1015da27c9edSUladzislau Rezki (Sony) computed_size = compute_subtree_max_size(va); 1016da27c9edSUladzislau Rezki (Sony) if (computed_size != va->subtree_max_size) 1017bb850f4dSUladzislau Rezki (Sony) pr_emerg("tree is corrupted: %lu, %lu\n", 1018bb850f4dSUladzislau Rezki (Sony) va_size(va), va->subtree_max_size); 1019bb850f4dSUladzislau Rezki (Sony) } 1020bb850f4dSUladzislau Rezki (Sony) } 1021bb850f4dSUladzislau Rezki (Sony) #endif 1022bb850f4dSUladzislau Rezki (Sony) 102368ad4a33SUladzislau Rezki (Sony) /* 102468ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper 102568ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done 102668ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or 102768ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree. 102868ad4a33SUladzislau Rezki (Sony) * 102968ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called: 103068ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path); 103168ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path); 103268ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path). 103368ad4a33SUladzislau Rezki (Sony) * 103468ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes 103568ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up 103668ad4a33SUladzislau Rezki (Sony) * to the root node. 103768ad4a33SUladzislau Rezki (Sony) * 103868ad4a33SUladzislau Rezki (Sony) * 4--8 103968ad4a33SUladzislau Rezki (Sony) * /\ 104068ad4a33SUladzislau Rezki (Sony) * / \ 104168ad4a33SUladzislau Rezki (Sony) * / \ 104268ad4a33SUladzislau Rezki (Sony) * 2--2 8--8 104368ad4a33SUladzislau Rezki (Sony) * 104468ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then 104568ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1 104668ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink 104768ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent 104868ad4a33SUladzislau Rezki (Sony) * node becomes 4--6. 104968ad4a33SUladzislau Rezki (Sony) */ 105068ad4a33SUladzislau Rezki (Sony) static __always_inline void 105168ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va) 105268ad4a33SUladzislau Rezki (Sony) { 105368ad4a33SUladzislau Rezki (Sony) /* 105415ae144fSUladzislau Rezki (Sony) * Populate the tree from bottom towards the root until 105515ae144fSUladzislau Rezki (Sony) * the calculated maximum available size of checked node 105615ae144fSUladzislau Rezki (Sony) * is equal to its current one. 105768ad4a33SUladzislau Rezki (Sony) */ 105815ae144fSUladzislau Rezki (Sony) free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 1059bb850f4dSUladzislau Rezki (Sony) 1060bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 1061da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(); 1062bb850f4dSUladzislau Rezki (Sony) #endif 106368ad4a33SUladzislau Rezki (Sony) } 106468ad4a33SUladzislau Rezki (Sony) 106568ad4a33SUladzislau Rezki (Sony) static void 106668ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va, 106768ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 106868ad4a33SUladzislau Rezki (Sony) { 106968ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 107068ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 107168ad4a33SUladzislau Rezki (Sony) 107268ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 10739c801f61SUladzislau Rezki (Sony) if (link) 107468ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 107568ad4a33SUladzislau Rezki (Sony) } 107668ad4a33SUladzislau Rezki (Sony) 107768ad4a33SUladzislau Rezki (Sony) static void 107868ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va, 107968ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root, 108068ad4a33SUladzislau Rezki (Sony) struct list_head *head) 108168ad4a33SUladzislau Rezki (Sony) { 108268ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 108368ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 108468ad4a33SUladzislau Rezki (Sony) 108568ad4a33SUladzislau Rezki (Sony) if (from) 108668ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent); 108768ad4a33SUladzislau Rezki (Sony) else 108868ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 108968ad4a33SUladzislau Rezki (Sony) 10909c801f61SUladzislau Rezki (Sony) if (link) { 10918eb510dbSUladzislau Rezki (Sony) link_va_augment(va, root, parent, link, head); 109268ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 109368ad4a33SUladzislau Rezki (Sony) } 10949c801f61SUladzislau Rezki (Sony) } 109568ad4a33SUladzislau Rezki (Sony) 109668ad4a33SUladzislau Rezki (Sony) /* 109768ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous 109868ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new 109968ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is 110068ad4a33SUladzislau Rezki (Sony) * freed. 11019c801f61SUladzislau Rezki (Sony) * 11029c801f61SUladzislau Rezki (Sony) * Please note, it can return NULL in case of overlap 11039c801f61SUladzislau Rezki (Sony) * ranges, followed by WARN() report. Despite it is a 11049c801f61SUladzislau Rezki (Sony) * buggy behaviour, a system can be alive and keep 11059c801f61SUladzislau Rezki (Sony) * ongoing. 110668ad4a33SUladzislau Rezki (Sony) */ 11073c5c3cfbSDaniel Axtens static __always_inline struct vmap_area * 11088eb510dbSUladzislau Rezki (Sony) __merge_or_add_vmap_area(struct vmap_area *va, 11098eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head, bool augment) 111068ad4a33SUladzislau Rezki (Sony) { 111168ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling; 111268ad4a33SUladzislau Rezki (Sony) struct list_head *next; 111368ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 111468ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 111568ad4a33SUladzislau Rezki (Sony) bool merged = false; 111668ad4a33SUladzislau Rezki (Sony) 111768ad4a33SUladzislau Rezki (Sony) /* 111868ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be 111968ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings. 112068ad4a33SUladzislau Rezki (Sony) */ 112168ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 11229c801f61SUladzislau Rezki (Sony) if (!link) 11239c801f61SUladzislau Rezki (Sony) return NULL; 112468ad4a33SUladzislau Rezki (Sony) 112568ad4a33SUladzislau Rezki (Sony) /* 112668ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done. 112768ad4a33SUladzislau Rezki (Sony) */ 112868ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link); 112968ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL)) 113068ad4a33SUladzislau Rezki (Sony) goto insert; 113168ad4a33SUladzislau Rezki (Sony) 113268ad4a33SUladzislau Rezki (Sony) /* 113368ad4a33SUladzislau Rezki (Sony) * start end 113468ad4a33SUladzislau Rezki (Sony) * | | 113568ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->| 113668ad4a33SUladzislau Rezki (Sony) * | | 113768ad4a33SUladzislau Rezki (Sony) * start end 113868ad4a33SUladzislau Rezki (Sony) */ 113968ad4a33SUladzislau Rezki (Sony) if (next != head) { 114068ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list); 114168ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) { 114268ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start; 114368ad4a33SUladzislau Rezki (Sony) 114468ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 114568ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 114668ad4a33SUladzislau Rezki (Sony) 114768ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */ 114868ad4a33SUladzislau Rezki (Sony) va = sibling; 114968ad4a33SUladzislau Rezki (Sony) merged = true; 115068ad4a33SUladzislau Rezki (Sony) } 115168ad4a33SUladzislau Rezki (Sony) } 115268ad4a33SUladzislau Rezki (Sony) 115368ad4a33SUladzislau Rezki (Sony) /* 115468ad4a33SUladzislau Rezki (Sony) * start end 115568ad4a33SUladzislau Rezki (Sony) * | | 115668ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>| 115768ad4a33SUladzislau Rezki (Sony) * | | 115868ad4a33SUladzislau Rezki (Sony) * start end 115968ad4a33SUladzislau Rezki (Sony) */ 116068ad4a33SUladzislau Rezki (Sony) if (next->prev != head) { 116168ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list); 116268ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) { 11635dd78640SUladzislau Rezki (Sony) /* 11645dd78640SUladzislau Rezki (Sony) * If both neighbors are coalesced, it is important 11655dd78640SUladzislau Rezki (Sony) * to unlink the "next" node first, followed by merging 11665dd78640SUladzislau Rezki (Sony) * with "previous" one. Otherwise the tree might not be 11675dd78640SUladzislau Rezki (Sony) * fully populated if a sibling's augmented value is 11685dd78640SUladzislau Rezki (Sony) * "normalized" because of rotation operations. 11695dd78640SUladzislau Rezki (Sony) */ 117054f63d9dSUladzislau Rezki (Sony) if (merged) 11718eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, augment); 117268ad4a33SUladzislau Rezki (Sony) 11735dd78640SUladzislau Rezki (Sony) sibling->va_end = va->va_end; 11745dd78640SUladzislau Rezki (Sony) 117568ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 117668ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 11773c5c3cfbSDaniel Axtens 11783c5c3cfbSDaniel Axtens /* Point to the new merged area. */ 11793c5c3cfbSDaniel Axtens va = sibling; 11803c5c3cfbSDaniel Axtens merged = true; 118168ad4a33SUladzislau Rezki (Sony) } 118268ad4a33SUladzislau Rezki (Sony) } 118368ad4a33SUladzislau Rezki (Sony) 118468ad4a33SUladzislau Rezki (Sony) insert: 11855dd78640SUladzislau Rezki (Sony) if (!merged) 11868eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, augment); 11873c5c3cfbSDaniel Axtens 118896e2db45SUladzislau Rezki (Sony) return va; 118996e2db45SUladzislau Rezki (Sony) } 119096e2db45SUladzislau Rezki (Sony) 119196e2db45SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 11928eb510dbSUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va, 11938eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 11948eb510dbSUladzislau Rezki (Sony) { 11958eb510dbSUladzislau Rezki (Sony) return __merge_or_add_vmap_area(va, root, head, false); 11968eb510dbSUladzislau Rezki (Sony) } 11978eb510dbSUladzislau Rezki (Sony) 11988eb510dbSUladzislau Rezki (Sony) static __always_inline struct vmap_area * 119996e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(struct vmap_area *va, 120096e2db45SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 120196e2db45SUladzislau Rezki (Sony) { 12028eb510dbSUladzislau Rezki (Sony) va = __merge_or_add_vmap_area(va, root, head, true); 120396e2db45SUladzislau Rezki (Sony) if (va) 12045dd78640SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 120596e2db45SUladzislau Rezki (Sony) 12063c5c3cfbSDaniel Axtens return va; 120768ad4a33SUladzislau Rezki (Sony) } 120868ad4a33SUladzislau Rezki (Sony) 120968ad4a33SUladzislau Rezki (Sony) static __always_inline bool 121068ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size, 121168ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 121268ad4a33SUladzislau Rezki (Sony) { 121368ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 121468ad4a33SUladzislau Rezki (Sony) 121568ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 121668ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 121768ad4a33SUladzislau Rezki (Sony) else 121868ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 121968ad4a33SUladzislau Rezki (Sony) 122068ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */ 122168ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr || 122268ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart) 122368ad4a33SUladzislau Rezki (Sony) return false; 122468ad4a33SUladzislau Rezki (Sony) 122568ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end); 122668ad4a33SUladzislau Rezki (Sony) } 122768ad4a33SUladzislau Rezki (Sony) 122868ad4a33SUladzislau Rezki (Sony) /* 122968ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree, 123068ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing 12319333fe98SUladzislau Rezki * parameters. Please note, with an alignment bigger than PAGE_SIZE, 12329333fe98SUladzislau Rezki * a search length is adjusted to account for worst case alignment 12339333fe98SUladzislau Rezki * overhead. 123468ad4a33SUladzislau Rezki (Sony) */ 123568ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 1236f9863be4SUladzislau Rezki (Sony) find_vmap_lowest_match(struct rb_root *root, unsigned long size, 1237f9863be4SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart, bool adjust_search_size) 123868ad4a33SUladzislau Rezki (Sony) { 123968ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 124068ad4a33SUladzislau Rezki (Sony) struct rb_node *node; 12419333fe98SUladzislau Rezki unsigned long length; 124268ad4a33SUladzislau Rezki (Sony) 124368ad4a33SUladzislau Rezki (Sony) /* Start from the root. */ 1244f9863be4SUladzislau Rezki (Sony) node = root->rb_node; 124568ad4a33SUladzislau Rezki (Sony) 12469333fe98SUladzislau Rezki /* Adjust the search size for alignment overhead. */ 12479333fe98SUladzislau Rezki length = adjust_search_size ? size + align - 1 : size; 12489333fe98SUladzislau Rezki 124968ad4a33SUladzislau Rezki (Sony) while (node) { 125068ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 125168ad4a33SUladzislau Rezki (Sony) 12529333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_left) >= length && 125368ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) { 125468ad4a33SUladzislau Rezki (Sony) node = node->rb_left; 125568ad4a33SUladzislau Rezki (Sony) } else { 125668ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 125768ad4a33SUladzislau Rezki (Sony) return va; 125868ad4a33SUladzislau Rezki (Sony) 125968ad4a33SUladzislau Rezki (Sony) /* 126068ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right 126168ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is 12629333fe98SUladzislau Rezki * equal or bigger to the requested search length. 126368ad4a33SUladzislau Rezki (Sony) */ 12649333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length) { 126568ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 126668ad4a33SUladzislau Rezki (Sony) continue; 126768ad4a33SUladzislau Rezki (Sony) } 126868ad4a33SUladzislau Rezki (Sony) 126968ad4a33SUladzislau Rezki (Sony) /* 12703806b041SAndrew Morton * OK. We roll back and find the first right sub-tree, 127168ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen 12729f531973SUladzislau Rezki (Sony) * due to "vstart" restriction or an alignment overhead 12739f531973SUladzislau Rezki (Sony) * that is bigger then PAGE_SIZE. 127468ad4a33SUladzislau Rezki (Sony) */ 127568ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) { 127668ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 127768ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 127868ad4a33SUladzislau Rezki (Sony) return va; 127968ad4a33SUladzislau Rezki (Sony) 12809333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length && 128168ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) { 12829f531973SUladzislau Rezki (Sony) /* 12839f531973SUladzislau Rezki (Sony) * Shift the vstart forward. Please note, we update it with 12849f531973SUladzislau Rezki (Sony) * parent's start address adding "1" because we do not want 12859f531973SUladzislau Rezki (Sony) * to enter same sub-tree after it has already been checked 12869f531973SUladzislau Rezki (Sony) * and no suitable free block found there. 12879f531973SUladzislau Rezki (Sony) */ 12889f531973SUladzislau Rezki (Sony) vstart = va->va_start + 1; 128968ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 129068ad4a33SUladzislau Rezki (Sony) break; 129168ad4a33SUladzislau Rezki (Sony) } 129268ad4a33SUladzislau Rezki (Sony) } 129368ad4a33SUladzislau Rezki (Sony) } 129468ad4a33SUladzislau Rezki (Sony) } 129568ad4a33SUladzislau Rezki (Sony) 129668ad4a33SUladzislau Rezki (Sony) return NULL; 129768ad4a33SUladzislau Rezki (Sony) } 129868ad4a33SUladzislau Rezki (Sony) 1299a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1300a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h> 1301a6cf4e0fSUladzislau Rezki (Sony) 1302a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area * 1303a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_linear_match(unsigned long size, 1304a6cf4e0fSUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 1305a6cf4e0fSUladzislau Rezki (Sony) { 1306a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va; 1307a6cf4e0fSUladzislau Rezki (Sony) 1308a6cf4e0fSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) { 1309a6cf4e0fSUladzislau Rezki (Sony) if (!is_within_this_va(va, size, align, vstart)) 1310a6cf4e0fSUladzislau Rezki (Sony) continue; 1311a6cf4e0fSUladzislau Rezki (Sony) 1312a6cf4e0fSUladzislau Rezki (Sony) return va; 1313a6cf4e0fSUladzislau Rezki (Sony) } 1314a6cf4e0fSUladzislau Rezki (Sony) 1315a6cf4e0fSUladzislau Rezki (Sony) return NULL; 1316a6cf4e0fSUladzislau Rezki (Sony) } 1317a6cf4e0fSUladzislau Rezki (Sony) 1318a6cf4e0fSUladzislau Rezki (Sony) static void 1319066fed59SUladzislau Rezki (Sony) find_vmap_lowest_match_check(unsigned long size, unsigned long align) 1320a6cf4e0fSUladzislau Rezki (Sony) { 1321a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va_1, *va_2; 1322a6cf4e0fSUladzislau Rezki (Sony) unsigned long vstart; 1323a6cf4e0fSUladzislau Rezki (Sony) unsigned int rnd; 1324a6cf4e0fSUladzislau Rezki (Sony) 1325a6cf4e0fSUladzislau Rezki (Sony) get_random_bytes(&rnd, sizeof(rnd)); 1326a6cf4e0fSUladzislau Rezki (Sony) vstart = VMALLOC_START + rnd; 1327a6cf4e0fSUladzislau Rezki (Sony) 13289333fe98SUladzislau Rezki va_1 = find_vmap_lowest_match(size, align, vstart, false); 1329066fed59SUladzislau Rezki (Sony) va_2 = find_vmap_lowest_linear_match(size, align, vstart); 1330a6cf4e0fSUladzislau Rezki (Sony) 1331a6cf4e0fSUladzislau Rezki (Sony) if (va_1 != va_2) 1332a6cf4e0fSUladzislau Rezki (Sony) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1333a6cf4e0fSUladzislau Rezki (Sony) va_1, va_2, vstart); 1334a6cf4e0fSUladzislau Rezki (Sony) } 1335a6cf4e0fSUladzislau Rezki (Sony) #endif 1336a6cf4e0fSUladzislau Rezki (Sony) 133768ad4a33SUladzislau Rezki (Sony) enum fit_type { 133868ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0, 133968ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */ 134068ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */ 134168ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */ 134268ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */ 134368ad4a33SUladzislau Rezki (Sony) }; 134468ad4a33SUladzislau Rezki (Sony) 134568ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type 134668ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va, 134768ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size) 134868ad4a33SUladzislau Rezki (Sony) { 134968ad4a33SUladzislau Rezki (Sony) enum fit_type type; 135068ad4a33SUladzislau Rezki (Sony) 135168ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */ 135268ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start || 135368ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end) 135468ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT; 135568ad4a33SUladzislau Rezki (Sony) 135668ad4a33SUladzislau Rezki (Sony) /* Now classify. */ 135768ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) { 135868ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size) 135968ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE; 136068ad4a33SUladzislau Rezki (Sony) else 136168ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE; 136268ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) { 136368ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE; 136468ad4a33SUladzislau Rezki (Sony) } else { 136568ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE; 136668ad4a33SUladzislau Rezki (Sony) } 136768ad4a33SUladzislau Rezki (Sony) 136868ad4a33SUladzislau Rezki (Sony) return type; 136968ad4a33SUladzislau Rezki (Sony) } 137068ad4a33SUladzislau Rezki (Sony) 137168ad4a33SUladzislau Rezki (Sony) static __always_inline int 1372f9863be4SUladzislau Rezki (Sony) adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, 1373f9863be4SUladzislau Rezki (Sony) struct vmap_area *va, unsigned long nva_start_addr, 1374f9863be4SUladzislau Rezki (Sony) unsigned long size) 137568ad4a33SUladzislau Rezki (Sony) { 13762c929233SArnd Bergmann struct vmap_area *lva = NULL; 13771b23ff80SBaoquan He enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); 137868ad4a33SUladzislau Rezki (Sony) 137968ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) { 138068ad4a33SUladzislau Rezki (Sony) /* 138168ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits. 138268ad4a33SUladzislau Rezki (Sony) * 138368ad4a33SUladzislau Rezki (Sony) * | | 138468ad4a33SUladzislau Rezki (Sony) * V NVA V 138568ad4a33SUladzislau Rezki (Sony) * |---------------| 138668ad4a33SUladzislau Rezki (Sony) */ 1387f9863be4SUladzislau Rezki (Sony) unlink_va_augment(va, root); 138868ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 138968ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) { 139068ad4a33SUladzislau Rezki (Sony) /* 139168ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA. 139268ad4a33SUladzislau Rezki (Sony) * 139368ad4a33SUladzislau Rezki (Sony) * | | 139468ad4a33SUladzislau Rezki (Sony) * V NVA V R 139568ad4a33SUladzislau Rezki (Sony) * |-------|-------| 139668ad4a33SUladzislau Rezki (Sony) */ 139768ad4a33SUladzislau Rezki (Sony) va->va_start += size; 139868ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) { 139968ad4a33SUladzislau Rezki (Sony) /* 140068ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA. 140168ad4a33SUladzislau Rezki (Sony) * 140268ad4a33SUladzislau Rezki (Sony) * | | 140368ad4a33SUladzislau Rezki (Sony) * L V NVA V 140468ad4a33SUladzislau Rezki (Sony) * |-------|-------| 140568ad4a33SUladzislau Rezki (Sony) */ 140668ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr; 140768ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) { 140868ad4a33SUladzislau Rezki (Sony) /* 140968ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA. 141068ad4a33SUladzislau Rezki (Sony) * 141168ad4a33SUladzislau Rezki (Sony) * | | 141268ad4a33SUladzislau Rezki (Sony) * L V NVA V R 141368ad4a33SUladzislau Rezki (Sony) * |---|-------|---| 141468ad4a33SUladzislau Rezki (Sony) */ 141582dd23e8SUladzislau Rezki (Sony) lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 141682dd23e8SUladzislau Rezki (Sony) if (unlikely(!lva)) { 141782dd23e8SUladzislau Rezki (Sony) /* 141882dd23e8SUladzislau Rezki (Sony) * For percpu allocator we do not do any pre-allocation 141982dd23e8SUladzislau Rezki (Sony) * and leave it as it is. The reason is it most likely 142082dd23e8SUladzislau Rezki (Sony) * never ends up with NE_FIT_TYPE splitting. In case of 142182dd23e8SUladzislau Rezki (Sony) * percpu allocations offsets and sizes are aligned to 142282dd23e8SUladzislau Rezki (Sony) * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 142382dd23e8SUladzislau Rezki (Sony) * are its main fitting cases. 142482dd23e8SUladzislau Rezki (Sony) * 142582dd23e8SUladzislau Rezki (Sony) * There are a few exceptions though, as an example it is 142682dd23e8SUladzislau Rezki (Sony) * a first allocation (early boot up) when we have "one" 142782dd23e8SUladzislau Rezki (Sony) * big free space that has to be split. 1428060650a2SUladzislau Rezki (Sony) * 1429060650a2SUladzislau Rezki (Sony) * Also we can hit this path in case of regular "vmap" 1430060650a2SUladzislau Rezki (Sony) * allocations, if "this" current CPU was not preloaded. 1431060650a2SUladzislau Rezki (Sony) * See the comment in alloc_vmap_area() why. If so, then 1432060650a2SUladzislau Rezki (Sony) * GFP_NOWAIT is used instead to get an extra object for 1433060650a2SUladzislau Rezki (Sony) * split purpose. That is rare and most time does not 1434060650a2SUladzislau Rezki (Sony) * occur. 1435060650a2SUladzislau Rezki (Sony) * 1436060650a2SUladzislau Rezki (Sony) * What happens if an allocation gets failed. Basically, 1437060650a2SUladzislau Rezki (Sony) * an "overflow" path is triggered to purge lazily freed 1438060650a2SUladzislau Rezki (Sony) * areas to free some memory, then, the "retry" path is 1439060650a2SUladzislau Rezki (Sony) * triggered to repeat one more time. See more details 1440060650a2SUladzislau Rezki (Sony) * in alloc_vmap_area() function. 144182dd23e8SUladzislau Rezki (Sony) */ 144268ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 144382dd23e8SUladzislau Rezki (Sony) if (!lva) 144468ad4a33SUladzislau Rezki (Sony) return -1; 144582dd23e8SUladzislau Rezki (Sony) } 144668ad4a33SUladzislau Rezki (Sony) 144768ad4a33SUladzislau Rezki (Sony) /* 144868ad4a33SUladzislau Rezki (Sony) * Build the remainder. 144968ad4a33SUladzislau Rezki (Sony) */ 145068ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start; 145168ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr; 145268ad4a33SUladzislau Rezki (Sony) 145368ad4a33SUladzislau Rezki (Sony) /* 145468ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size. 145568ad4a33SUladzislau Rezki (Sony) */ 145668ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size; 145768ad4a33SUladzislau Rezki (Sony) } else { 145868ad4a33SUladzislau Rezki (Sony) return -1; 145968ad4a33SUladzislau Rezki (Sony) } 146068ad4a33SUladzislau Rezki (Sony) 146168ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) { 146268ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 146368ad4a33SUladzislau Rezki (Sony) 14642c929233SArnd Bergmann if (lva) /* type == NE_FIT_TYPE */ 1465f9863be4SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, root, head); 146668ad4a33SUladzislau Rezki (Sony) } 146768ad4a33SUladzislau Rezki (Sony) 146868ad4a33SUladzislau Rezki (Sony) return 0; 146968ad4a33SUladzislau Rezki (Sony) } 147068ad4a33SUladzislau Rezki (Sony) 147168ad4a33SUladzislau Rezki (Sony) /* 147268ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success. 147368ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure. 147468ad4a33SUladzislau Rezki (Sony) */ 147568ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 1476f9863be4SUladzislau Rezki (Sony) __alloc_vmap_area(struct rb_root *root, struct list_head *head, 1477f9863be4SUladzislau Rezki (Sony) unsigned long size, unsigned long align, 1478cacca6baSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend) 147968ad4a33SUladzislau Rezki (Sony) { 14809333fe98SUladzislau Rezki bool adjust_search_size = true; 148168ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 148268ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 148368ad4a33SUladzislau Rezki (Sony) int ret; 148468ad4a33SUladzislau Rezki (Sony) 14859333fe98SUladzislau Rezki /* 14869333fe98SUladzislau Rezki * Do not adjust when: 14879333fe98SUladzislau Rezki * a) align <= PAGE_SIZE, because it does not make any sense. 14889333fe98SUladzislau Rezki * All blocks(their start addresses) are at least PAGE_SIZE 14899333fe98SUladzislau Rezki * aligned anyway; 14909333fe98SUladzislau Rezki * b) a short range where a requested size corresponds to exactly 14919333fe98SUladzislau Rezki * specified [vstart:vend] interval and an alignment > PAGE_SIZE. 14929333fe98SUladzislau Rezki * With adjusted search length an allocation would not succeed. 14939333fe98SUladzislau Rezki */ 14949333fe98SUladzislau Rezki if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) 14959333fe98SUladzislau Rezki adjust_search_size = false; 14969333fe98SUladzislau Rezki 1497f9863be4SUladzislau Rezki (Sony) va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); 149868ad4a33SUladzislau Rezki (Sony) if (unlikely(!va)) 149968ad4a33SUladzislau Rezki (Sony) return vend; 150068ad4a33SUladzislau Rezki (Sony) 150168ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 150268ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 150368ad4a33SUladzislau Rezki (Sony) else 150468ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 150568ad4a33SUladzislau Rezki (Sony) 150668ad4a33SUladzislau Rezki (Sony) /* Check the "vend" restriction. */ 150768ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size > vend) 150868ad4a33SUladzislau Rezki (Sony) return vend; 150968ad4a33SUladzislau Rezki (Sony) 151068ad4a33SUladzislau Rezki (Sony) /* Update the free vmap_area. */ 1511f9863be4SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); 15121b23ff80SBaoquan He if (WARN_ON_ONCE(ret)) 151368ad4a33SUladzislau Rezki (Sony) return vend; 151468ad4a33SUladzislau Rezki (Sony) 1515a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1516066fed59SUladzislau Rezki (Sony) find_vmap_lowest_match_check(size, align); 1517a6cf4e0fSUladzislau Rezki (Sony) #endif 1518a6cf4e0fSUladzislau Rezki (Sony) 151968ad4a33SUladzislau Rezki (Sony) return nva_start_addr; 152068ad4a33SUladzislau Rezki (Sony) } 15214da56b99SChris Wilson 1522db64fe02SNick Piggin /* 1523d98c9e83SAndrey Ryabinin * Free a region of KVA allocated by alloc_vmap_area 1524d98c9e83SAndrey Ryabinin */ 1525d98c9e83SAndrey Ryabinin static void free_vmap_area(struct vmap_area *va) 1526d98c9e83SAndrey Ryabinin { 1527d98c9e83SAndrey Ryabinin /* 1528d98c9e83SAndrey Ryabinin * Remove from the busy tree/list. 1529d98c9e83SAndrey Ryabinin */ 1530d98c9e83SAndrey Ryabinin spin_lock(&vmap_area_lock); 1531d98c9e83SAndrey Ryabinin unlink_va(va, &vmap_area_root); 1532d98c9e83SAndrey Ryabinin spin_unlock(&vmap_area_lock); 1533d98c9e83SAndrey Ryabinin 1534d98c9e83SAndrey Ryabinin /* 1535d98c9e83SAndrey Ryabinin * Insert/Merge it back to the free tree/list. 1536d98c9e83SAndrey Ryabinin */ 1537d98c9e83SAndrey Ryabinin spin_lock(&free_vmap_area_lock); 153896e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1539d98c9e83SAndrey Ryabinin spin_unlock(&free_vmap_area_lock); 1540d98c9e83SAndrey Ryabinin } 1541d98c9e83SAndrey Ryabinin 1542187f8cc4SUladzislau Rezki (Sony) static inline void 1543187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) 1544187f8cc4SUladzislau Rezki (Sony) { 1545187f8cc4SUladzislau Rezki (Sony) struct vmap_area *va = NULL; 1546187f8cc4SUladzislau Rezki (Sony) 1547187f8cc4SUladzislau Rezki (Sony) /* 1548187f8cc4SUladzislau Rezki (Sony) * Preload this CPU with one extra vmap_area object. It is used 1549187f8cc4SUladzislau Rezki (Sony) * when fit type of free area is NE_FIT_TYPE. It guarantees that 1550187f8cc4SUladzislau Rezki (Sony) * a CPU that does an allocation is preloaded. 1551187f8cc4SUladzislau Rezki (Sony) * 1552187f8cc4SUladzislau Rezki (Sony) * We do it in non-atomic context, thus it allows us to use more 1553187f8cc4SUladzislau Rezki (Sony) * permissive allocation masks to be more stable under low memory 1554187f8cc4SUladzislau Rezki (Sony) * condition and high memory pressure. 1555187f8cc4SUladzislau Rezki (Sony) */ 1556187f8cc4SUladzislau Rezki (Sony) if (!this_cpu_read(ne_fit_preload_node)) 1557187f8cc4SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1558187f8cc4SUladzislau Rezki (Sony) 1559187f8cc4SUladzislau Rezki (Sony) spin_lock(lock); 1560187f8cc4SUladzislau Rezki (Sony) 1561187f8cc4SUladzislau Rezki (Sony) if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va)) 1562187f8cc4SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1563187f8cc4SUladzislau Rezki (Sony) } 1564187f8cc4SUladzislau Rezki (Sony) 1565d98c9e83SAndrey Ryabinin /* 1566db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 1567db64fe02SNick Piggin * vstart and vend. 1568db64fe02SNick Piggin */ 1569db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 1570db64fe02SNick Piggin unsigned long align, 1571db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 1572db64fe02SNick Piggin int node, gfp_t gfp_mask) 1573db64fe02SNick Piggin { 1574187f8cc4SUladzislau Rezki (Sony) struct vmap_area *va; 157512e376a6SUladzislau Rezki (Sony) unsigned long freed; 15761da177e4SLinus Torvalds unsigned long addr; 1577db64fe02SNick Piggin int purged = 0; 1578d98c9e83SAndrey Ryabinin int ret; 1579db64fe02SNick Piggin 15807766970cSNick Piggin BUG_ON(!size); 1581891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 158289699605SNick Piggin BUG_ON(!is_power_of_2(align)); 1583db64fe02SNick Piggin 158468ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized)) 158568ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY); 158668ad4a33SUladzislau Rezki (Sony) 15875803ed29SChristoph Hellwig might_sleep(); 1588f07116d7SUladzislau Rezki (Sony) gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 15894da56b99SChris Wilson 1590f07116d7SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1591db64fe02SNick Piggin if (unlikely(!va)) 1592db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1593db64fe02SNick Piggin 15947f88f88fSCatalin Marinas /* 15957f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 15967f88f88fSCatalin Marinas * to avoid false negatives. 15977f88f88fSCatalin Marinas */ 1598f07116d7SUladzislau Rezki (Sony) kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 15997f88f88fSCatalin Marinas 1600db64fe02SNick Piggin retry: 1601187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); 1602f9863be4SUladzislau Rezki (Sony) addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, 1603f9863be4SUladzislau Rezki (Sony) size, align, vstart, vend); 1604187f8cc4SUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 160568ad4a33SUladzislau Rezki (Sony) 160689699605SNick Piggin /* 160768ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is 160868ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path. 160989699605SNick Piggin */ 161068ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend)) 161189699605SNick Piggin goto overflow; 161289699605SNick Piggin 161389699605SNick Piggin va->va_start = addr; 161489699605SNick Piggin va->va_end = addr + size; 1615688fcbfcSPengfei Li va->vm = NULL; 161668ad4a33SUladzislau Rezki (Sony) 1617e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 1618e36176beSUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 161989699605SNick Piggin spin_unlock(&vmap_area_lock); 162089699605SNick Piggin 162161e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 162289699605SNick Piggin BUG_ON(va->va_start < vstart); 162389699605SNick Piggin BUG_ON(va->va_end > vend); 162489699605SNick Piggin 1625d98c9e83SAndrey Ryabinin ret = kasan_populate_vmalloc(addr, size); 1626d98c9e83SAndrey Ryabinin if (ret) { 1627d98c9e83SAndrey Ryabinin free_vmap_area(va); 1628d98c9e83SAndrey Ryabinin return ERR_PTR(ret); 1629d98c9e83SAndrey Ryabinin } 1630d98c9e83SAndrey Ryabinin 163189699605SNick Piggin return va; 163289699605SNick Piggin 16337766970cSNick Piggin overflow: 1634db64fe02SNick Piggin if (!purged) { 1635db64fe02SNick Piggin purge_vmap_area_lazy(); 1636db64fe02SNick Piggin purged = 1; 1637db64fe02SNick Piggin goto retry; 1638db64fe02SNick Piggin } 16394da56b99SChris Wilson 164012e376a6SUladzislau Rezki (Sony) freed = 0; 16414da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 164212e376a6SUladzislau Rezki (Sony) 16434da56b99SChris Wilson if (freed > 0) { 16444da56b99SChris Wilson purged = 0; 16454da56b99SChris Wilson goto retry; 16464da56b99SChris Wilson } 16474da56b99SChris Wilson 164803497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1649756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1650756a025fSJoe Perches size); 165168ad4a33SUladzislau Rezki (Sony) 165268ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1653db64fe02SNick Piggin return ERR_PTR(-EBUSY); 1654db64fe02SNick Piggin } 1655db64fe02SNick Piggin 16564da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 16574da56b99SChris Wilson { 16584da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 16594da56b99SChris Wilson } 16604da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 16614da56b99SChris Wilson 16624da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 16634da56b99SChris Wilson { 16644da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 16654da56b99SChris Wilson } 16664da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 16674da56b99SChris Wilson 1668db64fe02SNick Piggin /* 1669db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 1670db64fe02SNick Piggin * before attempting to purge with a TLB flush. 1671db64fe02SNick Piggin * 1672db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 1673db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 1674db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 1675db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 1676db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 1677db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 1678db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 1679db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 1680db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 1681db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 1682db64fe02SNick Piggin * becomes a problem on bigger systems. 1683db64fe02SNick Piggin */ 1684db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 1685db64fe02SNick Piggin { 1686db64fe02SNick Piggin unsigned int log; 1687db64fe02SNick Piggin 1688db64fe02SNick Piggin log = fls(num_online_cpus()); 1689db64fe02SNick Piggin 1690db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1691db64fe02SNick Piggin } 1692db64fe02SNick Piggin 16934d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1694db64fe02SNick Piggin 16950574ecd1SChristoph Hellwig /* 1696f0953a1bSIngo Molnar * Serialize vmap purging. There is no actual critical section protected 1697153090f2SBaoquan He * by this lock, but we want to avoid concurrent calls for performance 16980574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 16990574ecd1SChristoph Hellwig */ 1700f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 17010574ecd1SChristoph Hellwig 170202b709dfSNick Piggin /* for per-CPU blocks */ 170302b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 170402b709dfSNick Piggin 17053ee48b6aSCliff Wickman /* 1706db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 1707db64fe02SNick Piggin */ 17080574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 1709db64fe02SNick Piggin { 17104d36e6f8SUladzislau Rezki (Sony) unsigned long resched_threshold; 1711baa468a6SBaoquan He struct list_head local_purge_list; 171296e2db45SUladzislau Rezki (Sony) struct vmap_area *va, *n_va; 1713db64fe02SNick Piggin 17140574ecd1SChristoph Hellwig lockdep_assert_held(&vmap_purge_lock); 171502b709dfSNick Piggin 171696e2db45SUladzislau Rezki (Sony) spin_lock(&purge_vmap_area_lock); 171796e2db45SUladzislau Rezki (Sony) purge_vmap_area_root = RB_ROOT; 1718baa468a6SBaoquan He list_replace_init(&purge_vmap_area_list, &local_purge_list); 171996e2db45SUladzislau Rezki (Sony) spin_unlock(&purge_vmap_area_lock); 172096e2db45SUladzislau Rezki (Sony) 1721baa468a6SBaoquan He if (unlikely(list_empty(&local_purge_list))) 172268571be9SUladzislau Rezki (Sony) return false; 172368571be9SUladzislau Rezki (Sony) 172496e2db45SUladzislau Rezki (Sony) start = min(start, 1725baa468a6SBaoquan He list_first_entry(&local_purge_list, 172696e2db45SUladzislau Rezki (Sony) struct vmap_area, list)->va_start); 172796e2db45SUladzislau Rezki (Sony) 172896e2db45SUladzislau Rezki (Sony) end = max(end, 1729baa468a6SBaoquan He list_last_entry(&local_purge_list, 173096e2db45SUladzislau Rezki (Sony) struct vmap_area, list)->va_end); 1731db64fe02SNick Piggin 17320574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 17334d36e6f8SUladzislau Rezki (Sony) resched_threshold = lazy_max_pages() << 1; 1734db64fe02SNick Piggin 1735e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 1736baa468a6SBaoquan He list_for_each_entry_safe(va, n_va, &local_purge_list, list) { 17374d36e6f8SUladzislau Rezki (Sony) unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 17383c5c3cfbSDaniel Axtens unsigned long orig_start = va->va_start; 17393c5c3cfbSDaniel Axtens unsigned long orig_end = va->va_end; 1740763b218dSJoel Fernandes 1741dd3b8353SUladzislau Rezki (Sony) /* 1742dd3b8353SUladzislau Rezki (Sony) * Finally insert or merge lazily-freed area. It is 1743dd3b8353SUladzislau Rezki (Sony) * detached and there is no need to "unlink" it from 1744dd3b8353SUladzislau Rezki (Sony) * anything. 1745dd3b8353SUladzislau Rezki (Sony) */ 174696e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root, 17473c5c3cfbSDaniel Axtens &free_vmap_area_list); 17483c5c3cfbSDaniel Axtens 17499c801f61SUladzislau Rezki (Sony) if (!va) 17509c801f61SUladzislau Rezki (Sony) continue; 17519c801f61SUladzislau Rezki (Sony) 17523c5c3cfbSDaniel Axtens if (is_vmalloc_or_module_addr((void *)orig_start)) 17533c5c3cfbSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 17543c5c3cfbSDaniel Axtens va->va_start, va->va_end); 1755dd3b8353SUladzislau Rezki (Sony) 17564d36e6f8SUladzislau Rezki (Sony) atomic_long_sub(nr, &vmap_lazy_nr); 175768571be9SUladzislau Rezki (Sony) 17584d36e6f8SUladzislau Rezki (Sony) if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) 1759e36176beSUladzislau Rezki (Sony) cond_resched_lock(&free_vmap_area_lock); 1760763b218dSJoel Fernandes } 1761e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 17620574ecd1SChristoph Hellwig return true; 1763db64fe02SNick Piggin } 1764db64fe02SNick Piggin 1765db64fe02SNick Piggin /* 1766db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 1767db64fe02SNick Piggin */ 1768db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 1769db64fe02SNick Piggin { 1770f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 17710574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 17720574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1773f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1774db64fe02SNick Piggin } 1775db64fe02SNick Piggin 1776690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work) 1777690467c8SUladzislau Rezki (Sony) { 1778690467c8SUladzislau Rezki (Sony) unsigned long nr_lazy; 1779690467c8SUladzislau Rezki (Sony) 1780690467c8SUladzislau Rezki (Sony) do { 1781690467c8SUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock); 1782690467c8SUladzislau Rezki (Sony) __purge_vmap_area_lazy(ULONG_MAX, 0); 1783690467c8SUladzislau Rezki (Sony) mutex_unlock(&vmap_purge_lock); 1784690467c8SUladzislau Rezki (Sony) 1785690467c8SUladzislau Rezki (Sony) /* Recheck if further work is required. */ 1786690467c8SUladzislau Rezki (Sony) nr_lazy = atomic_long_read(&vmap_lazy_nr); 1787690467c8SUladzislau Rezki (Sony) } while (nr_lazy > lazy_max_pages()); 1788690467c8SUladzislau Rezki (Sony) } 1789690467c8SUladzislau Rezki (Sony) 1790db64fe02SNick Piggin /* 179164141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 179264141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 179364141da5SJeremy Fitzhardinge * previously. 1794db64fe02SNick Piggin */ 179564141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 1796db64fe02SNick Piggin { 17974d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy; 179880c4bd7aSChris Wilson 1799dd3b8353SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 1800dd3b8353SUladzislau Rezki (Sony) unlink_va(va, &vmap_area_root); 1801dd3b8353SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 1802dd3b8353SUladzislau Rezki (Sony) 18034d36e6f8SUladzislau Rezki (Sony) nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 18044d36e6f8SUladzislau Rezki (Sony) PAGE_SHIFT, &vmap_lazy_nr); 180580c4bd7aSChris Wilson 180696e2db45SUladzislau Rezki (Sony) /* 180796e2db45SUladzislau Rezki (Sony) * Merge or place it to the purge tree/list. 180896e2db45SUladzislau Rezki (Sony) */ 180996e2db45SUladzislau Rezki (Sony) spin_lock(&purge_vmap_area_lock); 181096e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area(va, 181196e2db45SUladzislau Rezki (Sony) &purge_vmap_area_root, &purge_vmap_area_list); 181296e2db45SUladzislau Rezki (Sony) spin_unlock(&purge_vmap_area_lock); 181380c4bd7aSChris Wilson 181496e2db45SUladzislau Rezki (Sony) /* After this point, we may free va at any time */ 181580c4bd7aSChris Wilson if (unlikely(nr_lazy > lazy_max_pages())) 1816690467c8SUladzislau Rezki (Sony) schedule_work(&drain_vmap_work); 1817db64fe02SNick Piggin } 1818db64fe02SNick Piggin 1819b29acbdcSNick Piggin /* 1820b29acbdcSNick Piggin * Free and unmap a vmap area 1821b29acbdcSNick Piggin */ 1822b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 1823b29acbdcSNick Piggin { 1824b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 18254ad0ae8cSNicholas Piggin vunmap_range_noflush(va->va_start, va->va_end); 18268e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 182782a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end); 182882a2e924SChintan Pandya 1829c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 1830b29acbdcSNick Piggin } 1831b29acbdcSNick Piggin 1832993d0b28SMatthew Wilcox (Oracle) struct vmap_area *find_vmap_area(unsigned long addr) 1833db64fe02SNick Piggin { 1834db64fe02SNick Piggin struct vmap_area *va; 1835db64fe02SNick Piggin 1836db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1837*899c6efeSUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vmap_area_root); 1838db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 1839db64fe02SNick Piggin 1840db64fe02SNick Piggin return va; 1841db64fe02SNick Piggin } 1842db64fe02SNick Piggin 1843db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 1844db64fe02SNick Piggin 1845db64fe02SNick Piggin /* 1846db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 1847db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 1848db64fe02SNick Piggin */ 1849db64fe02SNick Piggin /* 1850db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 1851db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 1852db64fe02SNick Piggin * instead (we just need a rough idea) 1853db64fe02SNick Piggin */ 1854db64fe02SNick Piggin #if BITS_PER_LONG == 32 1855db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 1856db64fe02SNick Piggin #else 1857db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 1858db64fe02SNick Piggin #endif 1859db64fe02SNick Piggin 1860db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 1861db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 1862db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 1863db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 1864db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 1865db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 1866f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 1867f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 1868db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 1869f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 1870db64fe02SNick Piggin 1871db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 1872db64fe02SNick Piggin 1873db64fe02SNick Piggin struct vmap_block_queue { 1874db64fe02SNick Piggin spinlock_t lock; 1875db64fe02SNick Piggin struct list_head free; 1876db64fe02SNick Piggin }; 1877db64fe02SNick Piggin 1878db64fe02SNick Piggin struct vmap_block { 1879db64fe02SNick Piggin spinlock_t lock; 1880db64fe02SNick Piggin struct vmap_area *va; 1881db64fe02SNick Piggin unsigned long free, dirty; 18827d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 1883db64fe02SNick Piggin struct list_head free_list; 1884db64fe02SNick Piggin struct rcu_head rcu_head; 188502b709dfSNick Piggin struct list_head purge; 1886db64fe02SNick Piggin }; 1887db64fe02SNick Piggin 1888db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 1889db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 1890db64fe02SNick Piggin 1891db64fe02SNick Piggin /* 18920f14599cSMatthew Wilcox (Oracle) * XArray of vmap blocks, indexed by address, to quickly find a vmap block 1893db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 1894db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 1895db64fe02SNick Piggin */ 18960f14599cSMatthew Wilcox (Oracle) static DEFINE_XARRAY(vmap_blocks); 1897db64fe02SNick Piggin 1898db64fe02SNick Piggin /* 1899db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 1900db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 1901db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 1902db64fe02SNick Piggin * big problem. 1903db64fe02SNick Piggin */ 1904db64fe02SNick Piggin 1905db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 1906db64fe02SNick Piggin { 1907db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 1908db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 1909db64fe02SNick Piggin return addr; 1910db64fe02SNick Piggin } 1911db64fe02SNick Piggin 1912cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 1913cf725ce2SRoman Pen { 1914cf725ce2SRoman Pen unsigned long addr; 1915cf725ce2SRoman Pen 1916cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 1917cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 1918cf725ce2SRoman Pen return (void *)addr; 1919cf725ce2SRoman Pen } 1920cf725ce2SRoman Pen 1921cf725ce2SRoman Pen /** 1922cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 1923cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 1924cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 1925cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 1926cf725ce2SRoman Pen * 1927a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 1928cf725ce2SRoman Pen */ 1929cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 1930db64fe02SNick Piggin { 1931db64fe02SNick Piggin struct vmap_block_queue *vbq; 1932db64fe02SNick Piggin struct vmap_block *vb; 1933db64fe02SNick Piggin struct vmap_area *va; 1934db64fe02SNick Piggin unsigned long vb_idx; 1935db64fe02SNick Piggin int node, err; 1936cf725ce2SRoman Pen void *vaddr; 1937db64fe02SNick Piggin 1938db64fe02SNick Piggin node = numa_node_id(); 1939db64fe02SNick Piggin 1940db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 1941db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 1942db64fe02SNick Piggin if (unlikely(!vb)) 1943db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1944db64fe02SNick Piggin 1945db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 1946db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 1947db64fe02SNick Piggin node, gfp_mask); 1948ddf9c6d4STobias Klauser if (IS_ERR(va)) { 1949db64fe02SNick Piggin kfree(vb); 1950e7d86340SJulia Lawall return ERR_CAST(va); 1951db64fe02SNick Piggin } 1952db64fe02SNick Piggin 1953cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 1954db64fe02SNick Piggin spin_lock_init(&vb->lock); 1955db64fe02SNick Piggin vb->va = va; 1956cf725ce2SRoman Pen /* At least something should be left free */ 1957cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 1958cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 1959db64fe02SNick Piggin vb->dirty = 0; 19607d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 19617d61bfe8SRoman Pen vb->dirty_max = 0; 1962db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 1963db64fe02SNick Piggin 1964db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 19650f14599cSMatthew Wilcox (Oracle) err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask); 19660f14599cSMatthew Wilcox (Oracle) if (err) { 19670f14599cSMatthew Wilcox (Oracle) kfree(vb); 19680f14599cSMatthew Wilcox (Oracle) free_vmap_area(va); 19690f14599cSMatthew Wilcox (Oracle) return ERR_PTR(err); 19700f14599cSMatthew Wilcox (Oracle) } 1971db64fe02SNick Piggin 19723f804920SSebastian Andrzej Siewior vbq = raw_cpu_ptr(&vmap_block_queue); 1973db64fe02SNick Piggin spin_lock(&vbq->lock); 197468ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 1975db64fe02SNick Piggin spin_unlock(&vbq->lock); 1976db64fe02SNick Piggin 1977cf725ce2SRoman Pen return vaddr; 1978db64fe02SNick Piggin } 1979db64fe02SNick Piggin 1980db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 1981db64fe02SNick Piggin { 1982db64fe02SNick Piggin struct vmap_block *tmp; 1983db64fe02SNick Piggin 19840f14599cSMatthew Wilcox (Oracle) tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); 1985db64fe02SNick Piggin BUG_ON(tmp != vb); 1986db64fe02SNick Piggin 198764141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 198822a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 1989db64fe02SNick Piggin } 1990db64fe02SNick Piggin 199102b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 199202b709dfSNick Piggin { 199302b709dfSNick Piggin LIST_HEAD(purge); 199402b709dfSNick Piggin struct vmap_block *vb; 199502b709dfSNick Piggin struct vmap_block *n_vb; 199602b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 199702b709dfSNick Piggin 199802b709dfSNick Piggin rcu_read_lock(); 199902b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 200002b709dfSNick Piggin 200102b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 200202b709dfSNick Piggin continue; 200302b709dfSNick Piggin 200402b709dfSNick Piggin spin_lock(&vb->lock); 200502b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 200602b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 200702b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 20087d61bfe8SRoman Pen vb->dirty_min = 0; 20097d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 201002b709dfSNick Piggin spin_lock(&vbq->lock); 201102b709dfSNick Piggin list_del_rcu(&vb->free_list); 201202b709dfSNick Piggin spin_unlock(&vbq->lock); 201302b709dfSNick Piggin spin_unlock(&vb->lock); 201402b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 201502b709dfSNick Piggin } else 201602b709dfSNick Piggin spin_unlock(&vb->lock); 201702b709dfSNick Piggin } 201802b709dfSNick Piggin rcu_read_unlock(); 201902b709dfSNick Piggin 202002b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 202102b709dfSNick Piggin list_del(&vb->purge); 202202b709dfSNick Piggin free_vmap_block(vb); 202302b709dfSNick Piggin } 202402b709dfSNick Piggin } 202502b709dfSNick Piggin 202602b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 202702b709dfSNick Piggin { 202802b709dfSNick Piggin int cpu; 202902b709dfSNick Piggin 203002b709dfSNick Piggin for_each_possible_cpu(cpu) 203102b709dfSNick Piggin purge_fragmented_blocks(cpu); 203202b709dfSNick Piggin } 203302b709dfSNick Piggin 2034db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 2035db64fe02SNick Piggin { 2036db64fe02SNick Piggin struct vmap_block_queue *vbq; 2037db64fe02SNick Piggin struct vmap_block *vb; 2038cf725ce2SRoman Pen void *vaddr = NULL; 2039db64fe02SNick Piggin unsigned int order; 2040db64fe02SNick Piggin 2041891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 2042db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2043aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 2044aa91c4d8SJan Kara /* 2045aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 2046aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 2047aa91c4d8SJan Kara * early. 2048aa91c4d8SJan Kara */ 2049aa91c4d8SJan Kara return NULL; 2050aa91c4d8SJan Kara } 2051db64fe02SNick Piggin order = get_order(size); 2052db64fe02SNick Piggin 2053db64fe02SNick Piggin rcu_read_lock(); 20543f804920SSebastian Andrzej Siewior vbq = raw_cpu_ptr(&vmap_block_queue); 2055db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2056cf725ce2SRoman Pen unsigned long pages_off; 2057db64fe02SNick Piggin 2058db64fe02SNick Piggin spin_lock(&vb->lock); 2059cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 2060cf725ce2SRoman Pen spin_unlock(&vb->lock); 2061cf725ce2SRoman Pen continue; 2062cf725ce2SRoman Pen } 206302b709dfSNick Piggin 2064cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 2065cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 2066db64fe02SNick Piggin vb->free -= 1UL << order; 2067db64fe02SNick Piggin if (vb->free == 0) { 2068db64fe02SNick Piggin spin_lock(&vbq->lock); 2069de560423SNick Piggin list_del_rcu(&vb->free_list); 2070db64fe02SNick Piggin spin_unlock(&vbq->lock); 2071db64fe02SNick Piggin } 2072cf725ce2SRoman Pen 2073db64fe02SNick Piggin spin_unlock(&vb->lock); 2074db64fe02SNick Piggin break; 2075db64fe02SNick Piggin } 207602b709dfSNick Piggin 2077db64fe02SNick Piggin rcu_read_unlock(); 2078db64fe02SNick Piggin 2079cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 2080cf725ce2SRoman Pen if (!vaddr) 2081cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 2082db64fe02SNick Piggin 2083cf725ce2SRoman Pen return vaddr; 2084db64fe02SNick Piggin } 2085db64fe02SNick Piggin 208678a0e8c4SChristoph Hellwig static void vb_free(unsigned long addr, unsigned long size) 2087db64fe02SNick Piggin { 2088db64fe02SNick Piggin unsigned long offset; 2089db64fe02SNick Piggin unsigned int order; 2090db64fe02SNick Piggin struct vmap_block *vb; 2091db64fe02SNick Piggin 2092891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 2093db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2094b29acbdcSNick Piggin 209578a0e8c4SChristoph Hellwig flush_cache_vunmap(addr, addr + size); 2096b29acbdcSNick Piggin 2097db64fe02SNick Piggin order = get_order(size); 209878a0e8c4SChristoph Hellwig offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 20990f14599cSMatthew Wilcox (Oracle) vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr)); 2100db64fe02SNick Piggin 21014ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, addr + size); 210264141da5SJeremy Fitzhardinge 21038e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 210478a0e8c4SChristoph Hellwig flush_tlb_kernel_range(addr, addr + size); 210582a2e924SChintan Pandya 2106db64fe02SNick Piggin spin_lock(&vb->lock); 21077d61bfe8SRoman Pen 21087d61bfe8SRoman Pen /* Expand dirty range */ 21097d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 21107d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2111d086817dSMinChan Kim 2112db64fe02SNick Piggin vb->dirty += 1UL << order; 2113db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 2114de560423SNick Piggin BUG_ON(vb->free); 2115db64fe02SNick Piggin spin_unlock(&vb->lock); 2116db64fe02SNick Piggin free_vmap_block(vb); 2117db64fe02SNick Piggin } else 2118db64fe02SNick Piggin spin_unlock(&vb->lock); 2119db64fe02SNick Piggin } 2120db64fe02SNick Piggin 2121868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2122db64fe02SNick Piggin { 2123db64fe02SNick Piggin int cpu; 2124db64fe02SNick Piggin 21259b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 21269b463334SJeremy Fitzhardinge return; 21279b463334SJeremy Fitzhardinge 21285803ed29SChristoph Hellwig might_sleep(); 21295803ed29SChristoph Hellwig 2130db64fe02SNick Piggin for_each_possible_cpu(cpu) { 2131db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2132db64fe02SNick Piggin struct vmap_block *vb; 2133db64fe02SNick Piggin 2134db64fe02SNick Piggin rcu_read_lock(); 2135db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2136db64fe02SNick Piggin spin_lock(&vb->lock); 2137ad216c03SVijayanand Jitta if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { 21387d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 2139db64fe02SNick Piggin unsigned long s, e; 2140b136be5eSJoonsoo Kim 21417d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 21427d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 2143db64fe02SNick Piggin 21447d61bfe8SRoman Pen start = min(s, start); 21457d61bfe8SRoman Pen end = max(e, end); 21467d61bfe8SRoman Pen 2147db64fe02SNick Piggin flush = 1; 2148db64fe02SNick Piggin } 2149db64fe02SNick Piggin spin_unlock(&vb->lock); 2150db64fe02SNick Piggin } 2151db64fe02SNick Piggin rcu_read_unlock(); 2152db64fe02SNick Piggin } 2153db64fe02SNick Piggin 2154f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 21550574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 21560574ecd1SChristoph Hellwig if (!__purge_vmap_area_lazy(start, end) && flush) 21570574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 2158f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 2159db64fe02SNick Piggin } 2160868b104dSRick Edgecombe 2161868b104dSRick Edgecombe /** 2162868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 2163868b104dSRick Edgecombe * 2164868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 2165868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you 2166868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual 2167868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries 2168868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping). 2169868b104dSRick Edgecombe * 2170868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 2171868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases 2172868b104dSRick Edgecombe * from the vmap layer. 2173868b104dSRick Edgecombe */ 2174868b104dSRick Edgecombe void vm_unmap_aliases(void) 2175868b104dSRick Edgecombe { 2176868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2177868b104dSRick Edgecombe int flush = 0; 2178868b104dSRick Edgecombe 2179868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush); 2180868b104dSRick Edgecombe } 2181db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 2182db64fe02SNick Piggin 2183db64fe02SNick Piggin /** 2184db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 2185db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 2186db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 2187db64fe02SNick Piggin */ 2188db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 2189db64fe02SNick Piggin { 219065ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 21914aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(mem); 21929c3acf60SChristoph Hellwig struct vmap_area *va; 2193db64fe02SNick Piggin 21945803ed29SChristoph Hellwig might_sleep(); 2195db64fe02SNick Piggin BUG_ON(!addr); 2196db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 2197db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 2198a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 2199db64fe02SNick Piggin 2200d98c9e83SAndrey Ryabinin kasan_poison_vmalloc(mem, size); 2201d98c9e83SAndrey Ryabinin 22029c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 220305e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size); 220478a0e8c4SChristoph Hellwig vb_free(addr, size); 22059c3acf60SChristoph Hellwig return; 22069c3acf60SChristoph Hellwig } 22079c3acf60SChristoph Hellwig 22089c3acf60SChristoph Hellwig va = find_vmap_area(addr); 22099c3acf60SChristoph Hellwig BUG_ON(!va); 221005e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start, 221105e3ff95SChintan Pandya (va->va_end - va->va_start)); 22129c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 2213db64fe02SNick Piggin } 2214db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 2215db64fe02SNick Piggin 2216db64fe02SNick Piggin /** 2217db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 2218db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 2219db64fe02SNick Piggin * @count: number of pages 2220db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 2221e99c97adSRandy Dunlap * 222236437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 222336437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 222436437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 222536437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 222636437638SGioh Kim * the end. Please use this function for short-lived objects. 222736437638SGioh Kim * 2228e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 2229db64fe02SNick Piggin */ 2230d4efd79aSChristoph Hellwig void *vm_map_ram(struct page **pages, unsigned int count, int node) 2231db64fe02SNick Piggin { 223265ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 2233db64fe02SNick Piggin unsigned long addr; 2234db64fe02SNick Piggin void *mem; 2235db64fe02SNick Piggin 2236db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 2237db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 2238db64fe02SNick Piggin if (IS_ERR(mem)) 2239db64fe02SNick Piggin return NULL; 2240db64fe02SNick Piggin addr = (unsigned long)mem; 2241db64fe02SNick Piggin } else { 2242db64fe02SNick Piggin struct vmap_area *va; 2243db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 2244db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 2245db64fe02SNick Piggin if (IS_ERR(va)) 2246db64fe02SNick Piggin return NULL; 2247db64fe02SNick Piggin 2248db64fe02SNick Piggin addr = va->va_start; 2249db64fe02SNick Piggin mem = (void *)addr; 2250db64fe02SNick Piggin } 2251d98c9e83SAndrey Ryabinin 2252b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 2253b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) { 2254db64fe02SNick Piggin vm_unmap_ram(mem, count); 2255db64fe02SNick Piggin return NULL; 2256db64fe02SNick Piggin } 2257b67177ecSNicholas Piggin 225823689e91SAndrey Konovalov /* 225923689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped. 226023689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for 226123689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 226223689e91SAndrey Konovalov */ 2263f6e39794SAndrey Konovalov mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); 226419f1c3acSAndrey Konovalov 2265db64fe02SNick Piggin return mem; 2266db64fe02SNick Piggin } 2267db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 2268db64fe02SNick Piggin 22694341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 227092eac168SMike Rapoport 2271121e6f32SNicholas Piggin static inline unsigned int vm_area_page_order(struct vm_struct *vm) 2272121e6f32SNicholas Piggin { 2273121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2274121e6f32SNicholas Piggin return vm->page_order; 2275121e6f32SNicholas Piggin #else 2276121e6f32SNicholas Piggin return 0; 2277121e6f32SNicholas Piggin #endif 2278121e6f32SNicholas Piggin } 2279121e6f32SNicholas Piggin 2280121e6f32SNicholas Piggin static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 2281121e6f32SNicholas Piggin { 2282121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2283121e6f32SNicholas Piggin vm->page_order = order; 2284121e6f32SNicholas Piggin #else 2285121e6f32SNicholas Piggin BUG_ON(order != 0); 2286121e6f32SNicholas Piggin #endif 2287121e6f32SNicholas Piggin } 2288121e6f32SNicholas Piggin 2289f0aa6617STejun Heo /** 2290be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 2291be9b7335SNicolas Pitre * @vm: vm_struct to add 2292be9b7335SNicolas Pitre * 2293be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 2294be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 2295be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 2296be9b7335SNicolas Pitre * 2297be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2298be9b7335SNicolas Pitre */ 2299be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 2300be9b7335SNicolas Pitre { 2301be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 2302be9b7335SNicolas Pitre 2303be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 2304be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 2305be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 2306be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 2307be9b7335SNicolas Pitre break; 2308be9b7335SNicolas Pitre } else 2309be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 2310be9b7335SNicolas Pitre } 2311be9b7335SNicolas Pitre vm->next = *p; 2312be9b7335SNicolas Pitre *p = vm; 2313be9b7335SNicolas Pitre } 2314be9b7335SNicolas Pitre 2315be9b7335SNicolas Pitre /** 2316f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 2317f0aa6617STejun Heo * @vm: vm_struct to register 2318c0c0a293STejun Heo * @align: requested alignment 2319f0aa6617STejun Heo * 2320f0aa6617STejun Heo * This function is used to register kernel vm area before 2321f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 2322f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 2323f0aa6617STejun Heo * vm->addr contains the allocated address. 2324f0aa6617STejun Heo * 2325f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2326f0aa6617STejun Heo */ 2327c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 2328f0aa6617STejun Heo { 23290eb68437SKefeng Wang unsigned long addr = ALIGN(VMALLOC_START, align); 23300eb68437SKefeng Wang struct vm_struct *cur, **p; 2331f0aa6617STejun Heo 23320eb68437SKefeng Wang BUG_ON(vmap_initialized); 2333c0c0a293STejun Heo 23340eb68437SKefeng Wang for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { 23350eb68437SKefeng Wang if ((unsigned long)cur->addr - addr >= vm->size) 23360eb68437SKefeng Wang break; 23370eb68437SKefeng Wang addr = ALIGN((unsigned long)cur->addr + cur->size, align); 23380eb68437SKefeng Wang } 23390eb68437SKefeng Wang 23400eb68437SKefeng Wang BUG_ON(addr > VMALLOC_END - vm->size); 2341c0c0a293STejun Heo vm->addr = (void *)addr; 23420eb68437SKefeng Wang vm->next = *p; 23430eb68437SKefeng Wang *p = vm; 23443252b1d8SKefeng Wang kasan_populate_early_vm_area_shadow(vm->addr, vm->size); 2345f0aa6617STejun Heo } 2346f0aa6617STejun Heo 234768ad4a33SUladzislau Rezki (Sony) static void vmap_init_free_space(void) 234868ad4a33SUladzislau Rezki (Sony) { 234968ad4a33SUladzislau Rezki (Sony) unsigned long vmap_start = 1; 235068ad4a33SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX; 235168ad4a33SUladzislau Rezki (Sony) struct vmap_area *busy, *free; 235268ad4a33SUladzislau Rezki (Sony) 235368ad4a33SUladzislau Rezki (Sony) /* 235468ad4a33SUladzislau Rezki (Sony) * B F B B B F 235568ad4a33SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|- 235668ad4a33SUladzislau Rezki (Sony) * | The KVA space | 235768ad4a33SUladzislau Rezki (Sony) * |<--------------------------------->| 235868ad4a33SUladzislau Rezki (Sony) */ 235968ad4a33SUladzislau Rezki (Sony) list_for_each_entry(busy, &vmap_area_list, list) { 236068ad4a33SUladzislau Rezki (Sony) if (busy->va_start - vmap_start > 0) { 236168ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 236268ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 236368ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 236468ad4a33SUladzislau Rezki (Sony) free->va_end = busy->va_start; 236568ad4a33SUladzislau Rezki (Sony) 236668ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 236768ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 236868ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 236968ad4a33SUladzislau Rezki (Sony) } 237068ad4a33SUladzislau Rezki (Sony) } 237168ad4a33SUladzislau Rezki (Sony) 237268ad4a33SUladzislau Rezki (Sony) vmap_start = busy->va_end; 237368ad4a33SUladzislau Rezki (Sony) } 237468ad4a33SUladzislau Rezki (Sony) 237568ad4a33SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) { 237668ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 237768ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 237868ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 237968ad4a33SUladzislau Rezki (Sony) free->va_end = vmap_end; 238068ad4a33SUladzislau Rezki (Sony) 238168ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 238268ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 238368ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 238468ad4a33SUladzislau Rezki (Sony) } 238568ad4a33SUladzislau Rezki (Sony) } 238668ad4a33SUladzislau Rezki (Sony) } 238768ad4a33SUladzislau Rezki (Sony) 2388db64fe02SNick Piggin void __init vmalloc_init(void) 2389db64fe02SNick Piggin { 2390822c18f2SIvan Kokshaysky struct vmap_area *va; 2391822c18f2SIvan Kokshaysky struct vm_struct *tmp; 2392db64fe02SNick Piggin int i; 2393db64fe02SNick Piggin 239468ad4a33SUladzislau Rezki (Sony) /* 239568ad4a33SUladzislau Rezki (Sony) * Create the cache for vmap_area objects. 239668ad4a33SUladzislau Rezki (Sony) */ 239768ad4a33SUladzislau Rezki (Sony) vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 239868ad4a33SUladzislau Rezki (Sony) 2399db64fe02SNick Piggin for_each_possible_cpu(i) { 2400db64fe02SNick Piggin struct vmap_block_queue *vbq; 240132fcfd40SAl Viro struct vfree_deferred *p; 2402db64fe02SNick Piggin 2403db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 2404db64fe02SNick Piggin spin_lock_init(&vbq->lock); 2405db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 240632fcfd40SAl Viro p = &per_cpu(vfree_deferred, i); 240732fcfd40SAl Viro init_llist_head(&p->list); 240832fcfd40SAl Viro INIT_WORK(&p->wq, free_work); 2409db64fe02SNick Piggin } 24109b463334SJeremy Fitzhardinge 2411822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 2412822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 241368ad4a33SUladzislau Rezki (Sony) va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 241468ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va)) 241568ad4a33SUladzislau Rezki (Sony) continue; 241668ad4a33SUladzislau Rezki (Sony) 2417822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 2418822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 2419dbda591dSKyongHo va->vm = tmp; 242068ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 2421822c18f2SIvan Kokshaysky } 2422ca23e405STejun Heo 242368ad4a33SUladzislau Rezki (Sony) /* 242468ad4a33SUladzislau Rezki (Sony) * Now we can initialize a free vmap space. 242568ad4a33SUladzislau Rezki (Sony) */ 242668ad4a33SUladzislau Rezki (Sony) vmap_init_free_space(); 24279b463334SJeremy Fitzhardinge vmap_initialized = true; 2428db64fe02SNick Piggin } 2429db64fe02SNick Piggin 2430e36176beSUladzislau Rezki (Sony) static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, 2431e36176beSUladzislau Rezki (Sony) struct vmap_area *va, unsigned long flags, const void *caller) 2432cf88c790STejun Heo { 2433cf88c790STejun Heo vm->flags = flags; 2434cf88c790STejun Heo vm->addr = (void *)va->va_start; 2435cf88c790STejun Heo vm->size = va->va_end - va->va_start; 2436cf88c790STejun Heo vm->caller = caller; 2437db1aecafSMinchan Kim va->vm = vm; 2438e36176beSUladzislau Rezki (Sony) } 2439e36176beSUladzislau Rezki (Sony) 2440e36176beSUladzislau Rezki (Sony) static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 2441e36176beSUladzislau Rezki (Sony) unsigned long flags, const void *caller) 2442e36176beSUladzislau Rezki (Sony) { 2443e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 2444e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vm, va, flags, caller); 2445c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2446f5252e00SMitsuo Hayasaka } 2447cf88c790STejun Heo 244820fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 2449f5252e00SMitsuo Hayasaka { 2450d4033afdSJoonsoo Kim /* 245120fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 2452d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 2453d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 2454d4033afdSJoonsoo Kim */ 2455d4033afdSJoonsoo Kim smp_wmb(); 245620fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 2457cf88c790STejun Heo } 2458cf88c790STejun Heo 2459db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 24607ca3027bSDaniel Axtens unsigned long align, unsigned long shift, unsigned long flags, 24617ca3027bSDaniel Axtens unsigned long start, unsigned long end, int node, 24627ca3027bSDaniel Axtens gfp_t gfp_mask, const void *caller) 2463db64fe02SNick Piggin { 24640006526dSKautuk Consul struct vmap_area *va; 2465db64fe02SNick Piggin struct vm_struct *area; 2466d98c9e83SAndrey Ryabinin unsigned long requested_size = size; 24671da177e4SLinus Torvalds 246852fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 24697ca3027bSDaniel Axtens size = ALIGN(size, 1ul << shift); 247031be8309SOGAWA Hirofumi if (unlikely(!size)) 247131be8309SOGAWA Hirofumi return NULL; 24721da177e4SLinus Torvalds 2473252e5c6eSzijun_hu if (flags & VM_IOREMAP) 2474252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 2475252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 2476252e5c6eSzijun_hu 2477cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 24781da177e4SLinus Torvalds if (unlikely(!area)) 24791da177e4SLinus Torvalds return NULL; 24801da177e4SLinus Torvalds 248171394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 24821da177e4SLinus Torvalds size += PAGE_SIZE; 24831da177e4SLinus Torvalds 2484db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 2485db64fe02SNick Piggin if (IS_ERR(va)) { 2486db64fe02SNick Piggin kfree(area); 2487db64fe02SNick Piggin return NULL; 24881da177e4SLinus Torvalds } 24891da177e4SLinus Torvalds 2490d98c9e83SAndrey Ryabinin setup_vmalloc_vm(area, va, flags, caller); 24913c5c3cfbSDaniel Axtens 249219f1c3acSAndrey Konovalov /* 249319f1c3acSAndrey Konovalov * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a 249419f1c3acSAndrey Konovalov * best-effort approach, as they can be mapped outside of vmalloc code. 249519f1c3acSAndrey Konovalov * For VM_ALLOC mappings, the pages are marked as accessible after 249619f1c3acSAndrey Konovalov * getting mapped in __vmalloc_node_range(). 249723689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for 249823689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 249919f1c3acSAndrey Konovalov */ 250019f1c3acSAndrey Konovalov if (!(flags & VM_ALLOC)) 250123689e91SAndrey Konovalov area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, 2502f6e39794SAndrey Konovalov KASAN_VMALLOC_PROT_NORMAL); 25031d96320fSAndrey Konovalov 25041da177e4SLinus Torvalds return area; 25051da177e4SLinus Torvalds } 25061da177e4SLinus Torvalds 2507c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 2508c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 25095e6cafc8SMarek Szyprowski const void *caller) 2510c2968612SBenjamin Herrenschmidt { 25117ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, 25127ca3027bSDaniel Axtens NUMA_NO_NODE, GFP_KERNEL, caller); 2513c2968612SBenjamin Herrenschmidt } 2514c2968612SBenjamin Herrenschmidt 25151da177e4SLinus Torvalds /** 2516183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 25171da177e4SLinus Torvalds * @size: size of the area 25181da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 25191da177e4SLinus Torvalds * 25201da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 25211da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 25221da177e4SLinus Torvalds * on success or %NULL on failure. 2523a862f68aSMike Rapoport * 2524a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure. 25251da177e4SLinus Torvalds */ 25261da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 25271da177e4SLinus Torvalds { 25287ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 25297ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END, 253000ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 253100ef2d2fSDavid Rientjes __builtin_return_address(0)); 253223016969SChristoph Lameter } 253323016969SChristoph Lameter 253423016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 25355e6cafc8SMarek Szyprowski const void *caller) 253623016969SChristoph Lameter { 25377ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 25387ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END, 253900ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 25401da177e4SLinus Torvalds } 25411da177e4SLinus Torvalds 2542e9da6e99SMarek Szyprowski /** 2543e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 2544e9da6e99SMarek Szyprowski * @addr: base address 2545e9da6e99SMarek Szyprowski * 2546e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 2547e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 2548e9da6e99SMarek Szyprowski * pointer valid. 2549a862f68aSMike Rapoport * 255074640617SHui Su * Return: the area descriptor on success or %NULL on failure. 2551e9da6e99SMarek Szyprowski */ 2552e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 255383342314SNick Piggin { 2554db64fe02SNick Piggin struct vmap_area *va; 255583342314SNick Piggin 2556db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 2557688fcbfcSPengfei Li if (!va) 25587856dfebSAndi Kleen return NULL; 2559688fcbfcSPengfei Li 2560688fcbfcSPengfei Li return va->vm; 25617856dfebSAndi Kleen } 25627856dfebSAndi Kleen 25631da177e4SLinus Torvalds /** 2564183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 25651da177e4SLinus Torvalds * @addr: base address 25661da177e4SLinus Torvalds * 25671da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 25681da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 25697856dfebSAndi Kleen * on SMP machines, except for its size or flags. 2570a862f68aSMike Rapoport * 257174640617SHui Su * Return: the area descriptor on success or %NULL on failure. 25721da177e4SLinus Torvalds */ 2573b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 25741da177e4SLinus Torvalds { 2575db64fe02SNick Piggin struct vmap_area *va; 2576db64fe02SNick Piggin 25775803ed29SChristoph Hellwig might_sleep(); 25785803ed29SChristoph Hellwig 2579dd3b8353SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 2580*899c6efeSUladzislau Rezki (Sony) va = __find_vmap_area((unsigned long)addr, &vmap_area_root); 2581688fcbfcSPengfei Li if (va && va->vm) { 2582db1aecafSMinchan Kim struct vm_struct *vm = va->vm; 2583f5252e00SMitsuo Hayasaka 2584c69480adSJoonsoo Kim va->vm = NULL; 2585c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2586c69480adSJoonsoo Kim 258763840de2SAndrey Konovalov kasan_free_module_shadow(vm); 2588dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 2589dd32c279SKAMEZAWA Hiroyuki 2590db64fe02SNick Piggin return vm; 2591db64fe02SNick Piggin } 2592dd3b8353SUladzislau Rezki (Sony) 2593dd3b8353SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 2594db64fe02SNick Piggin return NULL; 25951da177e4SLinus Torvalds } 25961da177e4SLinus Torvalds 2597868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area, 2598868b104dSRick Edgecombe int (*set_direct_map)(struct page *page)) 2599868b104dSRick Edgecombe { 2600868b104dSRick Edgecombe int i; 2601868b104dSRick Edgecombe 2602121e6f32SNicholas Piggin /* HUGE_VMALLOC passes small pages to set_direct_map */ 2603868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) 2604868b104dSRick Edgecombe if (page_address(area->pages[i])) 2605868b104dSRick Edgecombe set_direct_map(area->pages[i]); 2606868b104dSRick Edgecombe } 2607868b104dSRick Edgecombe 2608868b104dSRick Edgecombe /* Handle removing and resetting vm mappings related to the vm_struct. */ 2609868b104dSRick Edgecombe static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2610868b104dSRick Edgecombe { 2611868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2612121e6f32SNicholas Piggin unsigned int page_order = vm_area_page_order(area); 2613868b104dSRick Edgecombe int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 261431e67340SRick Edgecombe int flush_dmap = 0; 2615868b104dSRick Edgecombe int i; 2616868b104dSRick Edgecombe 2617868b104dSRick Edgecombe remove_vm_area(area->addr); 2618868b104dSRick Edgecombe 2619868b104dSRick Edgecombe /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 2620868b104dSRick Edgecombe if (!flush_reset) 2621868b104dSRick Edgecombe return; 2622868b104dSRick Edgecombe 2623868b104dSRick Edgecombe /* 2624868b104dSRick Edgecombe * If not deallocating pages, just do the flush of the VM area and 2625868b104dSRick Edgecombe * return. 2626868b104dSRick Edgecombe */ 2627868b104dSRick Edgecombe if (!deallocate_pages) { 2628868b104dSRick Edgecombe vm_unmap_aliases(); 2629868b104dSRick Edgecombe return; 2630868b104dSRick Edgecombe } 2631868b104dSRick Edgecombe 2632868b104dSRick Edgecombe /* 2633868b104dSRick Edgecombe * If execution gets here, flush the vm mapping and reset the direct 2634868b104dSRick Edgecombe * map. Find the start and end range of the direct mappings to make sure 2635868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map. 2636868b104dSRick Edgecombe */ 2637121e6f32SNicholas Piggin for (i = 0; i < area->nr_pages; i += 1U << page_order) { 26388e41f872SRick Edgecombe unsigned long addr = (unsigned long)page_address(area->pages[i]); 26398e41f872SRick Edgecombe if (addr) { 2640121e6f32SNicholas Piggin unsigned long page_size; 2641121e6f32SNicholas Piggin 2642121e6f32SNicholas Piggin page_size = PAGE_SIZE << page_order; 2643868b104dSRick Edgecombe start = min(addr, start); 2644121e6f32SNicholas Piggin end = max(addr + page_size, end); 264531e67340SRick Edgecombe flush_dmap = 1; 2646868b104dSRick Edgecombe } 2647868b104dSRick Edgecombe } 2648868b104dSRick Edgecombe 2649868b104dSRick Edgecombe /* 2650868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if 2651868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and 2652868b104dSRick Edgecombe * reset the direct map permissions to the default. 2653868b104dSRick Edgecombe */ 2654868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush); 265531e67340SRick Edgecombe _vm_unmap_aliases(start, end, flush_dmap); 2656868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush); 2657868b104dSRick Edgecombe } 2658868b104dSRick Edgecombe 2659b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 26601da177e4SLinus Torvalds { 26611da177e4SLinus Torvalds struct vm_struct *area; 26621da177e4SLinus Torvalds 26631da177e4SLinus Torvalds if (!addr) 26641da177e4SLinus Torvalds return; 26651da177e4SLinus Torvalds 2666e69e9d4aSHATAYAMA Daisuke if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 2667ab15d9b4SDan Carpenter addr)) 26681da177e4SLinus Torvalds return; 26691da177e4SLinus Torvalds 26706ade2032SLiviu Dudau area = find_vm_area(addr); 26711da177e4SLinus Torvalds if (unlikely(!area)) { 26724c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 26731da177e4SLinus Torvalds addr); 26741da177e4SLinus Torvalds return; 26751da177e4SLinus Torvalds } 26761da177e4SLinus Torvalds 267705e3ff95SChintan Pandya debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 267805e3ff95SChintan Pandya debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 26799a11b49aSIngo Molnar 2680c041098cSVincenzo Frascino kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); 26813c5c3cfbSDaniel Axtens 2682868b104dSRick Edgecombe vm_remove_mappings(area, deallocate_pages); 2683868b104dSRick Edgecombe 26841da177e4SLinus Torvalds if (deallocate_pages) { 26853b8000aeSNicholas Piggin int i; 26861da177e4SLinus Torvalds 26873b8000aeSNicholas Piggin for (i = 0; i < area->nr_pages; i++) { 2688bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 2689bf53d6f8SChristoph Lameter 2690bf53d6f8SChristoph Lameter BUG_ON(!page); 26913b8000aeSNicholas Piggin mod_memcg_page_state(page, MEMCG_VMALLOC, -1); 26923b8000aeSNicholas Piggin /* 26933b8000aeSNicholas Piggin * High-order allocs for huge vmallocs are split, so 26943b8000aeSNicholas Piggin * can be freed as an array of order-0 allocations 26953b8000aeSNicholas Piggin */ 26963b8000aeSNicholas Piggin __free_pages(page, 0); 2697a850e932SRafael Aquini cond_resched(); 26981da177e4SLinus Torvalds } 269997105f0aSRoman Gushchin atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); 27001da177e4SLinus Torvalds 2701244d63eeSDavid Rientjes kvfree(area->pages); 27021da177e4SLinus Torvalds } 27031da177e4SLinus Torvalds 27041da177e4SLinus Torvalds kfree(area); 27051da177e4SLinus Torvalds } 27061da177e4SLinus Torvalds 2707bf22e37aSAndrey Ryabinin static inline void __vfree_deferred(const void *addr) 2708bf22e37aSAndrey Ryabinin { 2709bf22e37aSAndrey Ryabinin /* 2710bf22e37aSAndrey Ryabinin * Use raw_cpu_ptr() because this can be called from preemptible 2711bf22e37aSAndrey Ryabinin * context. Preemption is absolutely fine here, because the llist_add() 2712bf22e37aSAndrey Ryabinin * implementation is lockless, so it works even if we are adding to 271373221d88SJeongtae Park * another cpu's list. schedule_work() should be fine with this too. 2714bf22e37aSAndrey Ryabinin */ 2715bf22e37aSAndrey Ryabinin struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 2716bf22e37aSAndrey Ryabinin 2717bf22e37aSAndrey Ryabinin if (llist_add((struct llist_node *)addr, &p->list)) 2718bf22e37aSAndrey Ryabinin schedule_work(&p->wq); 2719bf22e37aSAndrey Ryabinin } 2720bf22e37aSAndrey Ryabinin 2721bf22e37aSAndrey Ryabinin /** 2722bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 2723bf22e37aSAndrey Ryabinin * @addr: memory base address 2724bf22e37aSAndrey Ryabinin * 2725bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 2726bf22e37aSAndrey Ryabinin * except NMIs. 2727bf22e37aSAndrey Ryabinin */ 2728bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 2729bf22e37aSAndrey Ryabinin { 2730bf22e37aSAndrey Ryabinin BUG_ON(in_nmi()); 2731bf22e37aSAndrey Ryabinin 2732bf22e37aSAndrey Ryabinin kmemleak_free(addr); 2733bf22e37aSAndrey Ryabinin 2734bf22e37aSAndrey Ryabinin if (!addr) 2735bf22e37aSAndrey Ryabinin return; 2736bf22e37aSAndrey Ryabinin __vfree_deferred(addr); 2737bf22e37aSAndrey Ryabinin } 2738bf22e37aSAndrey Ryabinin 2739c67dc624SRoman Penyaev static void __vfree(const void *addr) 2740c67dc624SRoman Penyaev { 2741c67dc624SRoman Penyaev if (unlikely(in_interrupt())) 2742c67dc624SRoman Penyaev __vfree_deferred(addr); 2743c67dc624SRoman Penyaev else 2744c67dc624SRoman Penyaev __vunmap(addr, 1); 2745c67dc624SRoman Penyaev } 2746c67dc624SRoman Penyaev 27471da177e4SLinus Torvalds /** 2748fa307474SMatthew Wilcox (Oracle) * vfree - Release memory allocated by vmalloc() 2749fa307474SMatthew Wilcox (Oracle) * @addr: Memory base address 27501da177e4SLinus Torvalds * 2751fa307474SMatthew Wilcox (Oracle) * Free the virtually continuous memory area starting at @addr, as obtained 2752fa307474SMatthew Wilcox (Oracle) * from one of the vmalloc() family of APIs. This will usually also free the 2753fa307474SMatthew Wilcox (Oracle) * physical memory underlying the virtual allocation, but that memory is 2754fa307474SMatthew Wilcox (Oracle) * reference counted, so it will not be freed until the last user goes away. 27551da177e4SLinus Torvalds * 2756fa307474SMatthew Wilcox (Oracle) * If @addr is NULL, no operation is performed. 275732fcfd40SAl Viro * 2758fa307474SMatthew Wilcox (Oracle) * Context: 27593ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context. 2760fa307474SMatthew Wilcox (Oracle) * Must not be called in NMI context (strictly speaking, it could be 2761fa307474SMatthew Wilcox (Oracle) * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 2762f0953a1bSIngo Molnar * conventions for vfree() arch-dependent would be a really bad idea). 27631da177e4SLinus Torvalds */ 2764b3bdda02SChristoph Lameter void vfree(const void *addr) 27651da177e4SLinus Torvalds { 276632fcfd40SAl Viro BUG_ON(in_nmi()); 276789219d37SCatalin Marinas 276889219d37SCatalin Marinas kmemleak_free(addr); 276989219d37SCatalin Marinas 2770a8dda165SAndrey Ryabinin might_sleep_if(!in_interrupt()); 2771a8dda165SAndrey Ryabinin 277232fcfd40SAl Viro if (!addr) 277332fcfd40SAl Viro return; 2774c67dc624SRoman Penyaev 2775c67dc624SRoman Penyaev __vfree(addr); 27761da177e4SLinus Torvalds } 27771da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 27781da177e4SLinus Torvalds 27791da177e4SLinus Torvalds /** 27801da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 27811da177e4SLinus Torvalds * @addr: memory base address 27821da177e4SLinus Torvalds * 27831da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 27841da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 27851da177e4SLinus Torvalds * 278680e93effSPekka Enberg * Must not be called in interrupt context. 27871da177e4SLinus Torvalds */ 2788b3bdda02SChristoph Lameter void vunmap(const void *addr) 27891da177e4SLinus Torvalds { 27901da177e4SLinus Torvalds BUG_ON(in_interrupt()); 279134754b69SPeter Zijlstra might_sleep(); 279232fcfd40SAl Viro if (addr) 27931da177e4SLinus Torvalds __vunmap(addr, 0); 27941da177e4SLinus Torvalds } 27951da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 27961da177e4SLinus Torvalds 27971da177e4SLinus Torvalds /** 27981da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 27991da177e4SLinus Torvalds * @pages: array of page pointers 28001da177e4SLinus Torvalds * @count: number of pages to map 28011da177e4SLinus Torvalds * @flags: vm_area->flags 28021da177e4SLinus Torvalds * @prot: page protection for the mapping 28031da177e4SLinus Torvalds * 2804b944afc9SChristoph Hellwig * Maps @count pages from @pages into contiguous kernel virtual space. 2805b944afc9SChristoph Hellwig * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 2806b944afc9SChristoph Hellwig * (which must be kmalloc or vmalloc memory) and one reference per pages in it 2807b944afc9SChristoph Hellwig * are transferred from the caller to vmap(), and will be freed / dropped when 2808b944afc9SChristoph Hellwig * vfree() is called on the return value. 2809a862f68aSMike Rapoport * 2810a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 28111da177e4SLinus Torvalds */ 28121da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 28131da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 28141da177e4SLinus Torvalds { 28151da177e4SLinus Torvalds struct vm_struct *area; 2816b67177ecSNicholas Piggin unsigned long addr; 281765ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 28181da177e4SLinus Torvalds 281934754b69SPeter Zijlstra might_sleep(); 282034754b69SPeter Zijlstra 2821bd1a8fb2SPeter Zijlstra /* 2822bd1a8fb2SPeter Zijlstra * Your top guard is someone else's bottom guard. Not having a top 2823bd1a8fb2SPeter Zijlstra * guard compromises someone else's mappings too. 2824bd1a8fb2SPeter Zijlstra */ 2825bd1a8fb2SPeter Zijlstra if (WARN_ON_ONCE(flags & VM_NO_GUARD)) 2826bd1a8fb2SPeter Zijlstra flags &= ~VM_NO_GUARD; 2827bd1a8fb2SPeter Zijlstra 2828ca79b0c2SArun KS if (count > totalram_pages()) 28291da177e4SLinus Torvalds return NULL; 28301da177e4SLinus Torvalds 283165ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 283265ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 28331da177e4SLinus Torvalds if (!area) 28341da177e4SLinus Torvalds return NULL; 283523016969SChristoph Lameter 2836b67177ecSNicholas Piggin addr = (unsigned long)area->addr; 2837b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 2838b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) { 28391da177e4SLinus Torvalds vunmap(area->addr); 28401da177e4SLinus Torvalds return NULL; 28411da177e4SLinus Torvalds } 28421da177e4SLinus Torvalds 2843c22ee528SMiaohe Lin if (flags & VM_MAP_PUT_PAGES) { 2844b944afc9SChristoph Hellwig area->pages = pages; 2845c22ee528SMiaohe Lin area->nr_pages = count; 2846c22ee528SMiaohe Lin } 28471da177e4SLinus Torvalds return area->addr; 28481da177e4SLinus Torvalds } 28491da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 28501da177e4SLinus Torvalds 28513e9a9e25SChristoph Hellwig #ifdef CONFIG_VMAP_PFN 28523e9a9e25SChristoph Hellwig struct vmap_pfn_data { 28533e9a9e25SChristoph Hellwig unsigned long *pfns; 28543e9a9e25SChristoph Hellwig pgprot_t prot; 28553e9a9e25SChristoph Hellwig unsigned int idx; 28563e9a9e25SChristoph Hellwig }; 28573e9a9e25SChristoph Hellwig 28583e9a9e25SChristoph Hellwig static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 28593e9a9e25SChristoph Hellwig { 28603e9a9e25SChristoph Hellwig struct vmap_pfn_data *data = private; 28613e9a9e25SChristoph Hellwig 28623e9a9e25SChristoph Hellwig if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) 28633e9a9e25SChristoph Hellwig return -EINVAL; 28643e9a9e25SChristoph Hellwig *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); 28653e9a9e25SChristoph Hellwig return 0; 28663e9a9e25SChristoph Hellwig } 28673e9a9e25SChristoph Hellwig 28683e9a9e25SChristoph Hellwig /** 28693e9a9e25SChristoph Hellwig * vmap_pfn - map an array of PFNs into virtually contiguous space 28703e9a9e25SChristoph Hellwig * @pfns: array of PFNs 28713e9a9e25SChristoph Hellwig * @count: number of pages to map 28723e9a9e25SChristoph Hellwig * @prot: page protection for the mapping 28733e9a9e25SChristoph Hellwig * 28743e9a9e25SChristoph Hellwig * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 28753e9a9e25SChristoph Hellwig * the start address of the mapping. 28763e9a9e25SChristoph Hellwig */ 28773e9a9e25SChristoph Hellwig void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 28783e9a9e25SChristoph Hellwig { 28793e9a9e25SChristoph Hellwig struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 28803e9a9e25SChristoph Hellwig struct vm_struct *area; 28813e9a9e25SChristoph Hellwig 28823e9a9e25SChristoph Hellwig area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 28833e9a9e25SChristoph Hellwig __builtin_return_address(0)); 28843e9a9e25SChristoph Hellwig if (!area) 28853e9a9e25SChristoph Hellwig return NULL; 28863e9a9e25SChristoph Hellwig if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 28873e9a9e25SChristoph Hellwig count * PAGE_SIZE, vmap_pfn_apply, &data)) { 28883e9a9e25SChristoph Hellwig free_vm_area(area); 28893e9a9e25SChristoph Hellwig return NULL; 28903e9a9e25SChristoph Hellwig } 28913e9a9e25SChristoph Hellwig return area->addr; 28923e9a9e25SChristoph Hellwig } 28933e9a9e25SChristoph Hellwig EXPORT_SYMBOL_GPL(vmap_pfn); 28943e9a9e25SChristoph Hellwig #endif /* CONFIG_VMAP_PFN */ 28953e9a9e25SChristoph Hellwig 289612b9f873SUladzislau Rezki static inline unsigned int 289712b9f873SUladzislau Rezki vm_area_alloc_pages(gfp_t gfp, int nid, 2898343ab817SUladzislau Rezki (Sony) unsigned int order, unsigned int nr_pages, struct page **pages) 289912b9f873SUladzislau Rezki { 290012b9f873SUladzislau Rezki unsigned int nr_allocated = 0; 2901ffb29b1cSChen Wandun struct page *page; 2902ffb29b1cSChen Wandun int i; 290312b9f873SUladzislau Rezki 290412b9f873SUladzislau Rezki /* 290512b9f873SUladzislau Rezki * For order-0 pages we make use of bulk allocator, if 290612b9f873SUladzislau Rezki * the page array is partly or not at all populated due 290712b9f873SUladzislau Rezki * to fails, fallback to a single page allocator that is 290812b9f873SUladzislau Rezki * more permissive. 290912b9f873SUladzislau Rezki */ 2910c00b6b96SChen Wandun if (!order) { 29119376130cSMichal Hocko gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL; 29129376130cSMichal Hocko 2913343ab817SUladzislau Rezki (Sony) while (nr_allocated < nr_pages) { 2914343ab817SUladzislau Rezki (Sony) unsigned int nr, nr_pages_request; 2915343ab817SUladzislau Rezki (Sony) 2916343ab817SUladzislau Rezki (Sony) /* 2917343ab817SUladzislau Rezki (Sony) * A maximum allowed request is hard-coded and is 100 2918343ab817SUladzislau Rezki (Sony) * pages per call. That is done in order to prevent a 2919343ab817SUladzislau Rezki (Sony) * long preemption off scenario in the bulk-allocator 2920343ab817SUladzislau Rezki (Sony) * so the range is [1:100]. 2921343ab817SUladzislau Rezki (Sony) */ 2922343ab817SUladzislau Rezki (Sony) nr_pages_request = min(100U, nr_pages - nr_allocated); 2923343ab817SUladzislau Rezki (Sony) 2924c00b6b96SChen Wandun /* memory allocation should consider mempolicy, we can't 2925c00b6b96SChen Wandun * wrongly use nearest node when nid == NUMA_NO_NODE, 2926c00b6b96SChen Wandun * otherwise memory may be allocated in only one node, 292798af39d5SYixuan Cao * but mempolicy wants to alloc memory by interleaving. 2928c00b6b96SChen Wandun */ 2929c00b6b96SChen Wandun if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 29309376130cSMichal Hocko nr = alloc_pages_bulk_array_mempolicy(bulk_gfp, 2931c00b6b96SChen Wandun nr_pages_request, 2932c00b6b96SChen Wandun pages + nr_allocated); 2933c00b6b96SChen Wandun 2934c00b6b96SChen Wandun else 29359376130cSMichal Hocko nr = alloc_pages_bulk_array_node(bulk_gfp, nid, 2936c00b6b96SChen Wandun nr_pages_request, 2937c00b6b96SChen Wandun pages + nr_allocated); 2938343ab817SUladzislau Rezki (Sony) 2939343ab817SUladzislau Rezki (Sony) nr_allocated += nr; 2940343ab817SUladzislau Rezki (Sony) cond_resched(); 2941343ab817SUladzislau Rezki (Sony) 2942343ab817SUladzislau Rezki (Sony) /* 2943343ab817SUladzislau Rezki (Sony) * If zero or pages were obtained partly, 2944343ab817SUladzislau Rezki (Sony) * fallback to a single page allocator. 2945343ab817SUladzislau Rezki (Sony) */ 2946343ab817SUladzislau Rezki (Sony) if (nr != nr_pages_request) 2947343ab817SUladzislau Rezki (Sony) break; 2948343ab817SUladzislau Rezki (Sony) } 29493b8000aeSNicholas Piggin } 295012b9f873SUladzislau Rezki 295112b9f873SUladzislau Rezki /* High-order pages or fallback path if "bulk" fails. */ 295212b9f873SUladzislau Rezki 2953ffb29b1cSChen Wandun while (nr_allocated < nr_pages) { 2954dd544141SVasily Averin if (fatal_signal_pending(current)) 2955dd544141SVasily Averin break; 2956dd544141SVasily Averin 2957ffb29b1cSChen Wandun if (nid == NUMA_NO_NODE) 2958ffb29b1cSChen Wandun page = alloc_pages(gfp, order); 2959ffb29b1cSChen Wandun else 296012b9f873SUladzislau Rezki page = alloc_pages_node(nid, gfp, order); 296112b9f873SUladzislau Rezki if (unlikely(!page)) 296212b9f873SUladzislau Rezki break; 29633b8000aeSNicholas Piggin /* 29643b8000aeSNicholas Piggin * Higher order allocations must be able to be treated as 29653b8000aeSNicholas Piggin * indepdenent small pages by callers (as they can with 29663b8000aeSNicholas Piggin * small-page vmallocs). Some drivers do their own refcounting 29673b8000aeSNicholas Piggin * on vmalloc_to_page() pages, some use page->mapping, 29683b8000aeSNicholas Piggin * page->lru, etc. 29693b8000aeSNicholas Piggin */ 29703b8000aeSNicholas Piggin if (order) 29713b8000aeSNicholas Piggin split_page(page, order); 297212b9f873SUladzislau Rezki 297312b9f873SUladzislau Rezki /* 297412b9f873SUladzislau Rezki * Careful, we allocate and map page-order pages, but 297512b9f873SUladzislau Rezki * tracking is done per PAGE_SIZE page so as to keep the 297612b9f873SUladzislau Rezki * vm_struct APIs independent of the physical/mapped size. 297712b9f873SUladzislau Rezki */ 297812b9f873SUladzislau Rezki for (i = 0; i < (1U << order); i++) 297912b9f873SUladzislau Rezki pages[nr_allocated + i] = page + i; 298012b9f873SUladzislau Rezki 298112b9f873SUladzislau Rezki cond_resched(); 298212b9f873SUladzislau Rezki nr_allocated += 1U << order; 298312b9f873SUladzislau Rezki } 298412b9f873SUladzislau Rezki 298512b9f873SUladzislau Rezki return nr_allocated; 298612b9f873SUladzislau Rezki } 298712b9f873SUladzislau Rezki 2988e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 2989121e6f32SNicholas Piggin pgprot_t prot, unsigned int page_shift, 2990121e6f32SNicholas Piggin int node) 29911da177e4SLinus Torvalds { 2992930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 29939376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL; 2994121e6f32SNicholas Piggin unsigned long addr = (unsigned long)area->addr; 2995121e6f32SNicholas Piggin unsigned long size = get_vm_area_size(area); 299634fe6537SAndrew Morton unsigned long array_size; 2997121e6f32SNicholas Piggin unsigned int nr_small_pages = size >> PAGE_SHIFT; 2998121e6f32SNicholas Piggin unsigned int page_order; 2999451769ebSMichal Hocko unsigned int flags; 3000451769ebSMichal Hocko int ret; 30011da177e4SLinus Torvalds 3002121e6f32SNicholas Piggin array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 3003f255935bSChristoph Hellwig gfp_mask |= __GFP_NOWARN; 3004f255935bSChristoph Hellwig if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 3005f255935bSChristoph Hellwig gfp_mask |= __GFP_HIGHMEM; 30061da177e4SLinus Torvalds 30071da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 30088757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 30095c1f4e69SUladzislau Rezki (Sony) area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, 3010f255935bSChristoph Hellwig area->caller); 3011286e1ea3SAndrew Morton } else { 30125c1f4e69SUladzislau Rezki (Sony) area->pages = kmalloc_node(array_size, nested_gfp, node); 3013286e1ea3SAndrew Morton } 30147ea36242SAustin Kim 30155c1f4e69SUladzislau Rezki (Sony) if (!area->pages) { 3016c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL, 3017f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to allocated page array size %lu", 3018d70bec8cSNicholas Piggin nr_small_pages * PAGE_SIZE, array_size); 3019cd61413bSUladzislau Rezki (Sony) free_vm_area(area); 30201da177e4SLinus Torvalds return NULL; 30211da177e4SLinus Torvalds } 30221da177e4SLinus Torvalds 3023121e6f32SNicholas Piggin set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 3024121e6f32SNicholas Piggin page_order = vm_area_page_order(area); 3025121e6f32SNicholas Piggin 3026c3d77172SUladzislau Rezki (Sony) area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, 3027c3d77172SUladzislau Rezki (Sony) node, page_order, nr_small_pages, area->pages); 30285c1f4e69SUladzislau Rezki (Sony) 302997105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 30304e5aa1f4SShakeel Butt if (gfp_mask & __GFP_ACCOUNT) { 30313b8000aeSNicholas Piggin int i; 30324e5aa1f4SShakeel Butt 30333b8000aeSNicholas Piggin for (i = 0; i < area->nr_pages; i++) 30343b8000aeSNicholas Piggin mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); 30354e5aa1f4SShakeel Butt } 30365c1f4e69SUladzislau Rezki (Sony) 30375c1f4e69SUladzislau Rezki (Sony) /* 30385c1f4e69SUladzislau Rezki (Sony) * If not enough pages were obtained to accomplish an 30395c1f4e69SUladzislau Rezki (Sony) * allocation request, free them via __vfree() if any. 30405c1f4e69SUladzislau Rezki (Sony) */ 30415c1f4e69SUladzislau Rezki (Sony) if (area->nr_pages != nr_small_pages) { 3042c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL, 3043f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, page order %u, failed to allocate pages", 3044d70bec8cSNicholas Piggin area->nr_pages * PAGE_SIZE, page_order); 30451da177e4SLinus Torvalds goto fail; 30461da177e4SLinus Torvalds } 3047121e6f32SNicholas Piggin 3048451769ebSMichal Hocko /* 3049451769ebSMichal Hocko * page tables allocations ignore external gfp mask, enforce it 3050451769ebSMichal Hocko * by the scope API 3051451769ebSMichal Hocko */ 3052451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3053451769ebSMichal Hocko flags = memalloc_nofs_save(); 3054451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3055451769ebSMichal Hocko flags = memalloc_noio_save(); 3056451769ebSMichal Hocko 30579376130cSMichal Hocko do { 3058451769ebSMichal Hocko ret = vmap_pages_range(addr, addr + size, prot, area->pages, 3059451769ebSMichal Hocko page_shift); 30609376130cSMichal Hocko if (nofail && (ret < 0)) 30619376130cSMichal Hocko schedule_timeout_uninterruptible(1); 30629376130cSMichal Hocko } while (nofail && (ret < 0)); 3063451769ebSMichal Hocko 3064451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3065451769ebSMichal Hocko memalloc_nofs_restore(flags); 3066451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3067451769ebSMichal Hocko memalloc_noio_restore(flags); 3068451769ebSMichal Hocko 3069451769ebSMichal Hocko if (ret < 0) { 3070c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL, 3071f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to map pages", 3072d70bec8cSNicholas Piggin area->nr_pages * PAGE_SIZE); 30731da177e4SLinus Torvalds goto fail; 3074d70bec8cSNicholas Piggin } 3075ed1f324cSChristoph Hellwig 30761da177e4SLinus Torvalds return area->addr; 30771da177e4SLinus Torvalds 30781da177e4SLinus Torvalds fail: 3079c67dc624SRoman Penyaev __vfree(area->addr); 30801da177e4SLinus Torvalds return NULL; 30811da177e4SLinus Torvalds } 30821da177e4SLinus Torvalds 3083d0a21265SDavid Rientjes /** 3084d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 3085d0a21265SDavid Rientjes * @size: allocation size 3086d0a21265SDavid Rientjes * @align: desired alignment 3087d0a21265SDavid Rientjes * @start: vm area range start 3088d0a21265SDavid Rientjes * @end: vm area range end 3089d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 3090d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 3091cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 309200ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 3093d0a21265SDavid Rientjes * @caller: caller's return address 3094d0a21265SDavid Rientjes * 3095d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 3096b7d90e7aSMichal Hocko * allocator with @gfp_mask flags. Please note that the full set of gfp 309730d3f011SMichal Hocko * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all 309830d3f011SMichal Hocko * supported. 309930d3f011SMichal Hocko * Zone modifiers are not supported. From the reclaim modifiers 310030d3f011SMichal Hocko * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) 310130d3f011SMichal Hocko * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and 310230d3f011SMichal Hocko * __GFP_RETRY_MAYFAIL are not supported). 310330d3f011SMichal Hocko * 310430d3f011SMichal Hocko * __GFP_NOWARN can be used to suppress failures messages. 3105b7d90e7aSMichal Hocko * 3106b7d90e7aSMichal Hocko * Map them into contiguous kernel virtual space, using a pagetable 3107b7d90e7aSMichal Hocko * protection of @prot. 3108a862f68aSMike Rapoport * 3109a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 3110d0a21265SDavid Rientjes */ 3111d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 3112d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 3113cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 3114cb9e3c29SAndrey Ryabinin const void *caller) 3115930fc45aSChristoph Lameter { 3116d0a21265SDavid Rientjes struct vm_struct *area; 311719f1c3acSAndrey Konovalov void *ret; 3118f6e39794SAndrey Konovalov kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; 3119d0a21265SDavid Rientjes unsigned long real_size = size; 3120121e6f32SNicholas Piggin unsigned long real_align = align; 3121121e6f32SNicholas Piggin unsigned int shift = PAGE_SHIFT; 3122d0a21265SDavid Rientjes 3123d70bec8cSNicholas Piggin if (WARN_ON_ONCE(!size)) 3124d70bec8cSNicholas Piggin return NULL; 3125d70bec8cSNicholas Piggin 3126d70bec8cSNicholas Piggin if ((size >> PAGE_SHIFT) > totalram_pages()) { 3127d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL, 3128f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, exceeds total pages", 3129f4bdfeafSUladzislau Rezki (Sony) real_size); 3130d70bec8cSNicholas Piggin return NULL; 3131121e6f32SNicholas Piggin } 3132d0a21265SDavid Rientjes 3133559089e0SSong Liu if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { 3134121e6f32SNicholas Piggin unsigned long size_per_node; 3135121e6f32SNicholas Piggin 3136121e6f32SNicholas Piggin /* 3137121e6f32SNicholas Piggin * Try huge pages. Only try for PAGE_KERNEL allocations, 3138121e6f32SNicholas Piggin * others like modules don't yet expect huge pages in 3139121e6f32SNicholas Piggin * their allocations due to apply_to_page_range not 3140121e6f32SNicholas Piggin * supporting them. 3141121e6f32SNicholas Piggin */ 3142121e6f32SNicholas Piggin 3143121e6f32SNicholas Piggin size_per_node = size; 3144121e6f32SNicholas Piggin if (node == NUMA_NO_NODE) 3145121e6f32SNicholas Piggin size_per_node /= num_online_nodes(); 31463382bbeeSChristophe Leroy if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) 3147121e6f32SNicholas Piggin shift = PMD_SHIFT; 31483382bbeeSChristophe Leroy else 31493382bbeeSChristophe Leroy shift = arch_vmap_pte_supported_shift(size_per_node); 31503382bbeeSChristophe Leroy 3151121e6f32SNicholas Piggin align = max(real_align, 1UL << shift); 3152121e6f32SNicholas Piggin size = ALIGN(real_size, 1UL << shift); 3153121e6f32SNicholas Piggin } 3154121e6f32SNicholas Piggin 3155121e6f32SNicholas Piggin again: 31567ca3027bSDaniel Axtens area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | 31577ca3027bSDaniel Axtens VM_UNINITIALIZED | vm_flags, start, end, node, 31587ca3027bSDaniel Axtens gfp_mask, caller); 3159d70bec8cSNicholas Piggin if (!area) { 31609376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL; 3161d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL, 31629376130cSMichal Hocko "vmalloc error: size %lu, vm_struct allocation failed%s", 31639376130cSMichal Hocko real_size, (nofail) ? ". Retrying." : ""); 31649376130cSMichal Hocko if (nofail) { 31659376130cSMichal Hocko schedule_timeout_uninterruptible(1); 31669376130cSMichal Hocko goto again; 31679376130cSMichal Hocko } 3168de7d2b56SJoe Perches goto fail; 3169d70bec8cSNicholas Piggin } 3170d0a21265SDavid Rientjes 3171f6e39794SAndrey Konovalov /* 3172f6e39794SAndrey Konovalov * Prepare arguments for __vmalloc_area_node() and 3173f6e39794SAndrey Konovalov * kasan_unpoison_vmalloc(). 3174f6e39794SAndrey Konovalov */ 3175f6e39794SAndrey Konovalov if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { 3176f6e39794SAndrey Konovalov if (kasan_hw_tags_enabled()) { 317701d92c7fSAndrey Konovalov /* 317801d92c7fSAndrey Konovalov * Modify protection bits to allow tagging. 3179f6e39794SAndrey Konovalov * This must be done before mapping. 318001d92c7fSAndrey Konovalov */ 318101d92c7fSAndrey Konovalov prot = arch_vmap_pgprot_tagged(prot); 318201d92c7fSAndrey Konovalov 318323689e91SAndrey Konovalov /* 3184f6e39794SAndrey Konovalov * Skip page_alloc poisoning and zeroing for physical 3185f6e39794SAndrey Konovalov * pages backing VM_ALLOC mapping. Memory is instead 3186f6e39794SAndrey Konovalov * poisoned and zeroed by kasan_unpoison_vmalloc(). 318723689e91SAndrey Konovalov */ 318823689e91SAndrey Konovalov gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO; 318923689e91SAndrey Konovalov } 319023689e91SAndrey Konovalov 3191f6e39794SAndrey Konovalov /* Take note that the mapping is PAGE_KERNEL. */ 3192f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; 3193f6e39794SAndrey Konovalov } 3194f6e39794SAndrey Konovalov 319501d92c7fSAndrey Konovalov /* Allocate physical pages and map them into vmalloc space. */ 319619f1c3acSAndrey Konovalov ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 319719f1c3acSAndrey Konovalov if (!ret) 3198121e6f32SNicholas Piggin goto fail; 319989219d37SCatalin Marinas 320023689e91SAndrey Konovalov /* 320123689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped. 32026c2f761dSAndrey Konovalov * The condition for setting KASAN_VMALLOC_INIT should complement the 32036c2f761dSAndrey Konovalov * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check 32046c2f761dSAndrey Konovalov * to make sure that memory is initialized under the same conditions. 3205f6e39794SAndrey Konovalov * Tag-based KASAN modes only assign tags to normal non-executable 3206f6e39794SAndrey Konovalov * allocations, see __kasan_unpoison_vmalloc(). 320723689e91SAndrey Konovalov */ 3208f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_VM_ALLOC; 32096c2f761dSAndrey Konovalov if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && 32106c2f761dSAndrey Konovalov (gfp_mask & __GFP_SKIP_ZERO)) 321123689e91SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_INIT; 3212f6e39794SAndrey Konovalov /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ 321323689e91SAndrey Konovalov area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); 321419f1c3acSAndrey Konovalov 321589219d37SCatalin Marinas /* 321620fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 321720fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 32184341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 3219f5252e00SMitsuo Hayasaka */ 322020fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 3221f5252e00SMitsuo Hayasaka 32227ca3027bSDaniel Axtens size = PAGE_ALIGN(size); 322360115fa5SKefeng Wang if (!(vm_flags & VM_DEFER_KMEMLEAK)) 322494f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask); 322589219d37SCatalin Marinas 322619f1c3acSAndrey Konovalov return area->addr; 3227de7d2b56SJoe Perches 3228de7d2b56SJoe Perches fail: 3229121e6f32SNicholas Piggin if (shift > PAGE_SHIFT) { 3230121e6f32SNicholas Piggin shift = PAGE_SHIFT; 3231121e6f32SNicholas Piggin align = real_align; 3232121e6f32SNicholas Piggin size = real_size; 3233121e6f32SNicholas Piggin goto again; 3234121e6f32SNicholas Piggin } 3235121e6f32SNicholas Piggin 3236de7d2b56SJoe Perches return NULL; 3237930fc45aSChristoph Lameter } 3238930fc45aSChristoph Lameter 32391da177e4SLinus Torvalds /** 3240930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 32411da177e4SLinus Torvalds * @size: allocation size 32422dca6999SDavid Miller * @align: desired alignment 32431da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 324400ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 3245c85d194bSRandy Dunlap * @caller: caller's return address 32461da177e4SLinus Torvalds * 3247f38fcb9cSChristoph Hellwig * Allocate enough pages to cover @size from the page level allocator with 3248f38fcb9cSChristoph Hellwig * @gfp_mask flags. Map them into contiguous kernel virtual space. 3249a7c3e901SMichal Hocko * 3250dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 3251a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported 3252a7c3e901SMichal Hocko * 3253a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted 3254a7c3e901SMichal Hocko * with mm people. 3255a862f68aSMike Rapoport * 3256a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 32571da177e4SLinus Torvalds */ 32582b905948SChristoph Hellwig void *__vmalloc_node(unsigned long size, unsigned long align, 3259f38fcb9cSChristoph Hellwig gfp_t gfp_mask, int node, const void *caller) 32601da177e4SLinus Torvalds { 3261d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 3262f38fcb9cSChristoph Hellwig gfp_mask, PAGE_KERNEL, 0, node, caller); 32631da177e4SLinus Torvalds } 3264c3f896dcSChristoph Hellwig /* 3265c3f896dcSChristoph Hellwig * This is only for performance analysis of vmalloc and stress purpose. 3266c3f896dcSChristoph Hellwig * It is required by vmalloc test module, therefore do not use it other 3267c3f896dcSChristoph Hellwig * than that. 3268c3f896dcSChristoph Hellwig */ 3269c3f896dcSChristoph Hellwig #ifdef CONFIG_TEST_VMALLOC_MODULE 3270c3f896dcSChristoph Hellwig EXPORT_SYMBOL_GPL(__vmalloc_node); 3271c3f896dcSChristoph Hellwig #endif 32721da177e4SLinus Torvalds 327388dca4caSChristoph Hellwig void *__vmalloc(unsigned long size, gfp_t gfp_mask) 3274930fc45aSChristoph Lameter { 3275f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, 327623016969SChristoph Lameter __builtin_return_address(0)); 3277930fc45aSChristoph Lameter } 32781da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 32791da177e4SLinus Torvalds 32801da177e4SLinus Torvalds /** 32811da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 32821da177e4SLinus Torvalds * @size: allocation size 328392eac168SMike Rapoport * 32841da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 32851da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 32861da177e4SLinus Torvalds * 3287c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 32881da177e4SLinus Torvalds * use __vmalloc() instead. 3289a862f68aSMike Rapoport * 3290a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 32911da177e4SLinus Torvalds */ 32921da177e4SLinus Torvalds void *vmalloc(unsigned long size) 32931da177e4SLinus Torvalds { 32944d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, 32954d39d728SChristoph Hellwig __builtin_return_address(0)); 32961da177e4SLinus Torvalds } 32971da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 32981da177e4SLinus Torvalds 3299930fc45aSChristoph Lameter /** 3300559089e0SSong Liu * vmalloc_huge - allocate virtually contiguous memory, allow huge pages 330115a64f5aSClaudio Imbrenda * @size: allocation size 3302559089e0SSong Liu * @gfp_mask: flags for the page level allocator 330315a64f5aSClaudio Imbrenda * 3304559089e0SSong Liu * Allocate enough pages to cover @size from the page level 330515a64f5aSClaudio Imbrenda * allocator and map them into contiguous kernel virtual space. 3306559089e0SSong Liu * If @size is greater than or equal to PMD_SIZE, allow using 3307559089e0SSong Liu * huge pages for the memory 330815a64f5aSClaudio Imbrenda * 330915a64f5aSClaudio Imbrenda * Return: pointer to the allocated memory or %NULL on error 331015a64f5aSClaudio Imbrenda */ 3311559089e0SSong Liu void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) 331215a64f5aSClaudio Imbrenda { 331315a64f5aSClaudio Imbrenda return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 3314559089e0SSong Liu gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 331515a64f5aSClaudio Imbrenda NUMA_NO_NODE, __builtin_return_address(0)); 331615a64f5aSClaudio Imbrenda } 3317559089e0SSong Liu EXPORT_SYMBOL_GPL(vmalloc_huge); 331815a64f5aSClaudio Imbrenda 331915a64f5aSClaudio Imbrenda /** 3320e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 3321e1ca7788SDave Young * @size: allocation size 332292eac168SMike Rapoport * 3323e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 3324e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 3325e1ca7788SDave Young * The memory allocated is set to zero. 3326e1ca7788SDave Young * 3327e1ca7788SDave Young * For tight control over page level allocator and protection flags 3328e1ca7788SDave Young * use __vmalloc() instead. 3329a862f68aSMike Rapoport * 3330a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3331e1ca7788SDave Young */ 3332e1ca7788SDave Young void *vzalloc(unsigned long size) 3333e1ca7788SDave Young { 33344d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 33354d39d728SChristoph Hellwig __builtin_return_address(0)); 3336e1ca7788SDave Young } 3337e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 3338e1ca7788SDave Young 3339e1ca7788SDave Young /** 3340ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 334183342314SNick Piggin * @size: allocation size 3342ead04089SRolf Eike Beer * 3343ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 3344ead04089SRolf Eike Beer * without leaking data. 3345a862f68aSMike Rapoport * 3346a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 334783342314SNick Piggin */ 334883342314SNick Piggin void *vmalloc_user(unsigned long size) 334983342314SNick Piggin { 3350bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3351bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 3352bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 335300ef2d2fSDavid Rientjes __builtin_return_address(0)); 335483342314SNick Piggin } 335583342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 335683342314SNick Piggin 335783342314SNick Piggin /** 3358930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 3359930fc45aSChristoph Lameter * @size: allocation size 3360d44e0780SRandy Dunlap * @node: numa node 3361930fc45aSChristoph Lameter * 3362930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 3363930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 3364930fc45aSChristoph Lameter * 3365c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 3366930fc45aSChristoph Lameter * use __vmalloc() instead. 3367a862f68aSMike Rapoport * 3368a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3369930fc45aSChristoph Lameter */ 3370930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 3371930fc45aSChristoph Lameter { 3372f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, node, 3373f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 3374930fc45aSChristoph Lameter } 3375930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 3376930fc45aSChristoph Lameter 3377e1ca7788SDave Young /** 3378e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 3379e1ca7788SDave Young * @size: allocation size 3380e1ca7788SDave Young * @node: numa node 3381e1ca7788SDave Young * 3382e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 3383e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 3384e1ca7788SDave Young * The memory allocated is set to zero. 3385e1ca7788SDave Young * 3386a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3387e1ca7788SDave Young */ 3388e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 3389e1ca7788SDave Young { 33904d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, 33914d39d728SChristoph Hellwig __builtin_return_address(0)); 3392e1ca7788SDave Young } 3393e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 3394e1ca7788SDave Young 33950d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 3396698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 33970d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 3398698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 33990d08e0d3SAndi Kleen #else 3400698d0831SMichal Hocko /* 3401698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others 3402698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone. 3403698d0831SMichal Hocko */ 340468d68ff6SZhiyuan Dai #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 34050d08e0d3SAndi Kleen #endif 34060d08e0d3SAndi Kleen 34071da177e4SLinus Torvalds /** 34081da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 34091da177e4SLinus Torvalds * @size: allocation size 34101da177e4SLinus Torvalds * 34111da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 34121da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 3413a862f68aSMike Rapoport * 3414a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 34151da177e4SLinus Torvalds */ 34161da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 34171da177e4SLinus Torvalds { 3418f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 3419f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 34201da177e4SLinus Torvalds } 34211da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 34221da177e4SLinus Torvalds 342383342314SNick Piggin /** 3424ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 342583342314SNick Piggin * @size: allocation size 3426ead04089SRolf Eike Beer * 3427ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 3428ead04089SRolf Eike Beer * mapped to userspace without leaking data. 3429a862f68aSMike Rapoport * 3430a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 343183342314SNick Piggin */ 343283342314SNick Piggin void *vmalloc_32_user(unsigned long size) 343383342314SNick Piggin { 3434bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3435bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 3436bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 34375a82ac71SRoman Penyaev __builtin_return_address(0)); 343883342314SNick Piggin } 343983342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 344083342314SNick Piggin 3441d0107eb0SKAMEZAWA Hiroyuki /* 3442d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 3443d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 3444d0107eb0SKAMEZAWA Hiroyuki */ 3445d0107eb0SKAMEZAWA Hiroyuki 3446d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 3447d0107eb0SKAMEZAWA Hiroyuki { 3448d0107eb0SKAMEZAWA Hiroyuki struct page *p; 3449d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 3450d0107eb0SKAMEZAWA Hiroyuki 3451d0107eb0SKAMEZAWA Hiroyuki while (count) { 3452d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 3453d0107eb0SKAMEZAWA Hiroyuki 3454891c49abSAlexander Kuleshov offset = offset_in_page(addr); 3455d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 3456d0107eb0SKAMEZAWA Hiroyuki if (length > count) 3457d0107eb0SKAMEZAWA Hiroyuki length = count; 3458d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 3459d0107eb0SKAMEZAWA Hiroyuki /* 3460d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 3461d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 3462f0953a1bSIngo Molnar * overhead of vmalloc()/vfree() calls for this _debug_ 3463d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 3464d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 3465d0107eb0SKAMEZAWA Hiroyuki */ 3466d0107eb0SKAMEZAWA Hiroyuki if (p) { 3467f7c8ce44SDavid Hildenbrand /* We can expect USER0 is not used -- see vread() */ 34689b04c5feSCong Wang void *map = kmap_atomic(p); 3469d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 34709b04c5feSCong Wang kunmap_atomic(map); 3471d0107eb0SKAMEZAWA Hiroyuki } else 3472d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 3473d0107eb0SKAMEZAWA Hiroyuki 3474d0107eb0SKAMEZAWA Hiroyuki addr += length; 3475d0107eb0SKAMEZAWA Hiroyuki buf += length; 3476d0107eb0SKAMEZAWA Hiroyuki copied += length; 3477d0107eb0SKAMEZAWA Hiroyuki count -= length; 3478d0107eb0SKAMEZAWA Hiroyuki } 3479d0107eb0SKAMEZAWA Hiroyuki return copied; 3480d0107eb0SKAMEZAWA Hiroyuki } 3481d0107eb0SKAMEZAWA Hiroyuki 3482d0107eb0SKAMEZAWA Hiroyuki /** 3483d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 3484d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 3485d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 3486d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 3487d0107eb0SKAMEZAWA Hiroyuki * 3488d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 3489d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 3490d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 3491d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 3492d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 3493d0107eb0SKAMEZAWA Hiroyuki * 3494d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 3495a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 3496d0107eb0SKAMEZAWA Hiroyuki * 3497d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 3498d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 3499d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 3500bbcd53c9SDavid Hildenbrand * any information, as /proc/kcore. 3501a862f68aSMike Rapoport * 3502a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased 3503a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't 3504a862f68aSMike Rapoport * include any intersection with valid vmalloc area 3505d0107eb0SKAMEZAWA Hiroyuki */ 35061da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 35071da177e4SLinus Torvalds { 3508e81ce85fSJoonsoo Kim struct vmap_area *va; 3509e81ce85fSJoonsoo Kim struct vm_struct *vm; 35101da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 3511d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 35121da177e4SLinus Torvalds unsigned long n; 35131da177e4SLinus Torvalds 35144aff1dc4SAndrey Konovalov addr = kasan_reset_tag(addr); 35154aff1dc4SAndrey Konovalov 35161da177e4SLinus Torvalds /* Don't allow overflow */ 35171da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 35181da177e4SLinus Torvalds count = -(unsigned long) addr; 35191da177e4SLinus Torvalds 3520e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 3521f181234aSChen Wandun va = find_vmap_area_exceed_addr((unsigned long)addr); 3522f608788cSSerapheim Dimitropoulos if (!va) 3523f608788cSSerapheim Dimitropoulos goto finished; 3524f181234aSChen Wandun 3525f181234aSChen Wandun /* no intersects with alive vmap_area */ 3526f181234aSChen Wandun if ((unsigned long)addr + count <= va->va_start) 3527f181234aSChen Wandun goto finished; 3528f181234aSChen Wandun 3529f608788cSSerapheim Dimitropoulos list_for_each_entry_from(va, &vmap_area_list, list) { 3530e81ce85fSJoonsoo Kim if (!count) 3531e81ce85fSJoonsoo Kim break; 3532e81ce85fSJoonsoo Kim 3533688fcbfcSPengfei Li if (!va->vm) 3534e81ce85fSJoonsoo Kim continue; 3535e81ce85fSJoonsoo Kim 3536e81ce85fSJoonsoo Kim vm = va->vm; 3537e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 3538762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 35391da177e4SLinus Torvalds continue; 35401da177e4SLinus Torvalds while (addr < vaddr) { 35411da177e4SLinus Torvalds if (count == 0) 35421da177e4SLinus Torvalds goto finished; 35431da177e4SLinus Torvalds *buf = '\0'; 35441da177e4SLinus Torvalds buf++; 35451da177e4SLinus Torvalds addr++; 35461da177e4SLinus Torvalds count--; 35471da177e4SLinus Torvalds } 3548762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 3549d0107eb0SKAMEZAWA Hiroyuki if (n > count) 3550d0107eb0SKAMEZAWA Hiroyuki n = count; 3551e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) 3552d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 3553d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 3554d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 3555d0107eb0SKAMEZAWA Hiroyuki buf += n; 3556d0107eb0SKAMEZAWA Hiroyuki addr += n; 3557d0107eb0SKAMEZAWA Hiroyuki count -= n; 35581da177e4SLinus Torvalds } 35591da177e4SLinus Torvalds finished: 3560e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 3561d0107eb0SKAMEZAWA Hiroyuki 3562d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 3563d0107eb0SKAMEZAWA Hiroyuki return 0; 3564d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 3565d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 3566d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 3567d0107eb0SKAMEZAWA Hiroyuki 3568d0107eb0SKAMEZAWA Hiroyuki return buflen; 35691da177e4SLinus Torvalds } 35701da177e4SLinus Torvalds 3571d0107eb0SKAMEZAWA Hiroyuki /** 3572e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 3573e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 3574e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 3575e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 3576bdebd6a2SJann Horn * @pgoff: offset from @kaddr to start at 3577e69e9d4aSHATAYAMA Daisuke * @size: size of map area 3578e69e9d4aSHATAYAMA Daisuke * 3579e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 3580e69e9d4aSHATAYAMA Daisuke * 3581e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 3582e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 3583e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 3584e69e9d4aSHATAYAMA Daisuke * met. 3585e69e9d4aSHATAYAMA Daisuke * 3586e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 3587e69e9d4aSHATAYAMA Daisuke */ 3588e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 3589bdebd6a2SJann Horn void *kaddr, unsigned long pgoff, 3590bdebd6a2SJann Horn unsigned long size) 3591e69e9d4aSHATAYAMA Daisuke { 3592e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 3593bdebd6a2SJann Horn unsigned long off; 3594bdebd6a2SJann Horn unsigned long end_index; 3595bdebd6a2SJann Horn 3596bdebd6a2SJann Horn if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 3597bdebd6a2SJann Horn return -EINVAL; 3598e69e9d4aSHATAYAMA Daisuke 3599e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 3600e69e9d4aSHATAYAMA Daisuke 3601e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 3602e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3603e69e9d4aSHATAYAMA Daisuke 3604e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 3605e69e9d4aSHATAYAMA Daisuke if (!area) 3606e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3607e69e9d4aSHATAYAMA Daisuke 3608fe9041c2SChristoph Hellwig if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 3609e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3610e69e9d4aSHATAYAMA Daisuke 3611bdebd6a2SJann Horn if (check_add_overflow(size, off, &end_index) || 3612bdebd6a2SJann Horn end_index > get_vm_area_size(area)) 3613e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3614bdebd6a2SJann Horn kaddr += off; 3615e69e9d4aSHATAYAMA Daisuke 3616e69e9d4aSHATAYAMA Daisuke do { 3617e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 3618e69e9d4aSHATAYAMA Daisuke int ret; 3619e69e9d4aSHATAYAMA Daisuke 3620e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 3621e69e9d4aSHATAYAMA Daisuke if (ret) 3622e69e9d4aSHATAYAMA Daisuke return ret; 3623e69e9d4aSHATAYAMA Daisuke 3624e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 3625e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 3626e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 3627e69e9d4aSHATAYAMA Daisuke } while (size > 0); 3628e69e9d4aSHATAYAMA Daisuke 3629e69e9d4aSHATAYAMA Daisuke vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3630e69e9d4aSHATAYAMA Daisuke 3631e69e9d4aSHATAYAMA Daisuke return 0; 3632e69e9d4aSHATAYAMA Daisuke } 3633e69e9d4aSHATAYAMA Daisuke 3634e69e9d4aSHATAYAMA Daisuke /** 363583342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 363683342314SNick Piggin * @vma: vma to cover (map full range of vma) 363783342314SNick Piggin * @addr: vmalloc memory 363883342314SNick Piggin * @pgoff: number of pages into addr before first page to map 36397682486bSRandy Dunlap * 36407682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 364183342314SNick Piggin * 364283342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 364383342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 364483342314SNick Piggin * that criteria isn't met. 364583342314SNick Piggin * 364672fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 364783342314SNick Piggin */ 364883342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 364983342314SNick Piggin unsigned long pgoff) 365083342314SNick Piggin { 3651e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 3652bdebd6a2SJann Horn addr, pgoff, 3653e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 365483342314SNick Piggin } 365583342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 365683342314SNick Piggin 36575f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 36585f4352fbSJeremy Fitzhardinge { 36595f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 36605f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 36615f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 36625f4352fbSJeremy Fitzhardinge kfree(area); 36635f4352fbSJeremy Fitzhardinge } 36645f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 3665a10aa579SChristoph Lameter 36664f8b02b4STejun Heo #ifdef CONFIG_SMP 3667ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 3668ca23e405STejun Heo { 36694583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 3670ca23e405STejun Heo } 3671ca23e405STejun Heo 3672ca23e405STejun Heo /** 367368ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 367468ad4a33SUladzislau Rezki (Sony) * @addr: target address 3675ca23e405STejun Heo * 367668ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area 367768ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned 367868ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL 367968ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr. 3680ca23e405STejun Heo */ 368168ad4a33SUladzislau Rezki (Sony) static struct vmap_area * 368268ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr) 3683ca23e405STejun Heo { 368468ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp; 368568ad4a33SUladzislau Rezki (Sony) struct rb_node *n; 368668ad4a33SUladzislau Rezki (Sony) 368768ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node; 368868ad4a33SUladzislau Rezki (Sony) va = NULL; 3689ca23e405STejun Heo 3690ca23e405STejun Heo while (n) { 369168ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node); 369268ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) { 369368ad4a33SUladzislau Rezki (Sony) va = tmp; 369468ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr) 3695ca23e405STejun Heo break; 3696ca23e405STejun Heo 369768ad4a33SUladzislau Rezki (Sony) n = n->rb_right; 3698ca23e405STejun Heo } else { 369968ad4a33SUladzislau Rezki (Sony) n = n->rb_left; 3700ca23e405STejun Heo } 370168ad4a33SUladzislau Rezki (Sony) } 370268ad4a33SUladzislau Rezki (Sony) 370368ad4a33SUladzislau Rezki (Sony) return va; 3704ca23e405STejun Heo } 3705ca23e405STejun Heo 3706ca23e405STejun Heo /** 370768ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address 370868ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END 370968ad4a33SUladzislau Rezki (Sony) * @va: 371068ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order); 371168ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address. 3712799fa85dSAlex Shi * @align: alignment for required highest address 3713ca23e405STejun Heo * 371468ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area 3715ca23e405STejun Heo */ 371668ad4a33SUladzislau Rezki (Sony) static unsigned long 371768ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 3718ca23e405STejun Heo { 371968ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3720ca23e405STejun Heo unsigned long addr; 3721ca23e405STejun Heo 372268ad4a33SUladzislau Rezki (Sony) if (likely(*va)) { 372368ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va), 372468ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) { 372568ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 372668ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr) 372768ad4a33SUladzislau Rezki (Sony) return addr; 372868ad4a33SUladzislau Rezki (Sony) } 3729ca23e405STejun Heo } 3730ca23e405STejun Heo 373168ad4a33SUladzislau Rezki (Sony) return 0; 3732ca23e405STejun Heo } 3733ca23e405STejun Heo 3734ca23e405STejun Heo /** 3735ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 3736ca23e405STejun Heo * @offsets: array containing offset of each area 3737ca23e405STejun Heo * @sizes: array containing size of each area 3738ca23e405STejun Heo * @nr_vms: the number of areas to allocate 3739ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 3740ca23e405STejun Heo * 3741ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 3742ca23e405STejun Heo * vm_structs on success, %NULL on failure 3743ca23e405STejun Heo * 3744ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 3745ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 3746ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 3747ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 3748ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 3749ec3f64fcSDavid Rientjes * areas are allocated from top. 3750ca23e405STejun Heo * 3751ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 375268ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking 375368ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the 375468ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till 375568ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted 375668ad4a33SUladzislau Rezki (Sony) * and the result is returned. 3757ca23e405STejun Heo */ 3758ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 3759ca23e405STejun Heo const size_t *sizes, int nr_vms, 3760ec3f64fcSDavid Rientjes size_t align) 3761ca23e405STejun Heo { 3762ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 3763ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 376468ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va; 3765ca23e405STejun Heo struct vm_struct **vms; 3766ca23e405STejun Heo int area, area2, last_area, term_area; 3767253a496dSDaniel Axtens unsigned long base, start, size, end, last_end, orig_start, orig_end; 3768ca23e405STejun Heo bool purged = false; 3769ca23e405STejun Heo 3770ca23e405STejun Heo /* verify parameters and allocate data structures */ 3771891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 3772ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 3773ca23e405STejun Heo start = offsets[area]; 3774ca23e405STejun Heo end = start + sizes[area]; 3775ca23e405STejun Heo 3776ca23e405STejun Heo /* is everything aligned properly? */ 3777ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 3778ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 3779ca23e405STejun Heo 3780ca23e405STejun Heo /* detect the area with the highest address */ 3781ca23e405STejun Heo if (start > offsets[last_area]) 3782ca23e405STejun Heo last_area = area; 3783ca23e405STejun Heo 3784c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) { 3785ca23e405STejun Heo unsigned long start2 = offsets[area2]; 3786ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 3787ca23e405STejun Heo 3788c568da28SWei Yang BUG_ON(start2 < end && start < end2); 3789ca23e405STejun Heo } 3790ca23e405STejun Heo } 3791ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 3792ca23e405STejun Heo 3793ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 3794ca23e405STejun Heo WARN_ON(true); 3795ca23e405STejun Heo return NULL; 3796ca23e405STejun Heo } 3797ca23e405STejun Heo 37984d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 37994d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 3800ca23e405STejun Heo if (!vas || !vms) 3801f1db7afdSKautuk Consul goto err_free2; 3802ca23e405STejun Heo 3803ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 380468ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 3805ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 3806ca23e405STejun Heo if (!vas[area] || !vms[area]) 3807ca23e405STejun Heo goto err_free; 3808ca23e405STejun Heo } 3809ca23e405STejun Heo retry: 3810e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 3811ca23e405STejun Heo 3812ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 3813ca23e405STejun Heo area = term_area = last_area; 3814ca23e405STejun Heo start = offsets[area]; 3815ca23e405STejun Heo end = start + sizes[area]; 3816ca23e405STejun Heo 381768ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end); 381868ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3819ca23e405STejun Heo 3820ca23e405STejun Heo while (true) { 3821ca23e405STejun Heo /* 3822ca23e405STejun Heo * base might have underflowed, add last_end before 3823ca23e405STejun Heo * comparing. 3824ca23e405STejun Heo */ 382568ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end) 382668ad4a33SUladzislau Rezki (Sony) goto overflow; 3827ca23e405STejun Heo 3828ca23e405STejun Heo /* 382968ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found. 3830ca23e405STejun Heo */ 383168ad4a33SUladzislau Rezki (Sony) if (va == NULL) 383268ad4a33SUladzislau Rezki (Sony) goto overflow; 3833ca23e405STejun Heo 3834ca23e405STejun Heo /* 3835d8cc323dSQiujun Huang * If required width exceeds current VA block, move 38365336e52cSKuppuswamy Sathyanarayanan * base downwards and then recheck. 38375336e52cSKuppuswamy Sathyanarayanan */ 38385336e52cSKuppuswamy Sathyanarayanan if (base + end > va->va_end) { 38395336e52cSKuppuswamy Sathyanarayanan base = pvm_determine_end_from_reverse(&va, align) - end; 38405336e52cSKuppuswamy Sathyanarayanan term_area = area; 38415336e52cSKuppuswamy Sathyanarayanan continue; 38425336e52cSKuppuswamy Sathyanarayanan } 38435336e52cSKuppuswamy Sathyanarayanan 38445336e52cSKuppuswamy Sathyanarayanan /* 384568ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck. 3846ca23e405STejun Heo */ 38475336e52cSKuppuswamy Sathyanarayanan if (base + start < va->va_start) { 384868ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node)); 384968ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3850ca23e405STejun Heo term_area = area; 3851ca23e405STejun Heo continue; 3852ca23e405STejun Heo } 3853ca23e405STejun Heo 3854ca23e405STejun Heo /* 3855ca23e405STejun Heo * This area fits, move on to the previous one. If 3856ca23e405STejun Heo * the previous one is the terminal one, we're done. 3857ca23e405STejun Heo */ 3858ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 3859ca23e405STejun Heo if (area == term_area) 3860ca23e405STejun Heo break; 386168ad4a33SUladzislau Rezki (Sony) 3862ca23e405STejun Heo start = offsets[area]; 3863ca23e405STejun Heo end = start + sizes[area]; 386468ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end); 3865ca23e405STejun Heo } 386668ad4a33SUladzislau Rezki (Sony) 3867ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 3868ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 386968ad4a33SUladzislau Rezki (Sony) int ret; 3870ca23e405STejun Heo 387168ad4a33SUladzislau Rezki (Sony) start = base + offsets[area]; 387268ad4a33SUladzislau Rezki (Sony) size = sizes[area]; 387368ad4a33SUladzislau Rezki (Sony) 387468ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start); 387568ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL)) 387668ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 387768ad4a33SUladzislau Rezki (Sony) goto recovery; 387868ad4a33SUladzislau Rezki (Sony) 3879f9863be4SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(&free_vmap_area_root, 3880f9863be4SUladzislau Rezki (Sony) &free_vmap_area_list, 3881f9863be4SUladzislau Rezki (Sony) va, start, size); 38821b23ff80SBaoquan He if (WARN_ON_ONCE(unlikely(ret))) 388368ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 388468ad4a33SUladzislau Rezki (Sony) goto recovery; 388568ad4a33SUladzislau Rezki (Sony) 388668ad4a33SUladzislau Rezki (Sony) /* Allocated area. */ 388768ad4a33SUladzislau Rezki (Sony) va = vas[area]; 388868ad4a33SUladzislau Rezki (Sony) va->va_start = start; 388968ad4a33SUladzislau Rezki (Sony) va->va_end = start + size; 3890ca23e405STejun Heo } 3891ca23e405STejun Heo 3892e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 3893ca23e405STejun Heo 3894253a496dSDaniel Axtens /* populate the kasan shadow space */ 3895253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 3896253a496dSDaniel Axtens if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 3897253a496dSDaniel Axtens goto err_free_shadow; 3898253a496dSDaniel Axtens } 3899253a496dSDaniel Axtens 3900ca23e405STejun Heo /* insert all vm's */ 3901e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 3902e36176beSUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 3903e36176beSUladzislau Rezki (Sony) insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); 3904e36176beSUladzislau Rezki (Sony) 3905e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, 3906ca23e405STejun Heo pcpu_get_vm_areas); 3907e36176beSUladzislau Rezki (Sony) } 3908e36176beSUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 3909ca23e405STejun Heo 391019f1c3acSAndrey Konovalov /* 391119f1c3acSAndrey Konovalov * Mark allocated areas as accessible. Do it now as a best-effort 391219f1c3acSAndrey Konovalov * approach, as they can be mapped outside of vmalloc code. 391323689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for 391423689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 391519f1c3acSAndrey Konovalov */ 39161d96320fSAndrey Konovalov for (area = 0; area < nr_vms; area++) 39171d96320fSAndrey Konovalov vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, 3918f6e39794SAndrey Konovalov vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); 39191d96320fSAndrey Konovalov 3920ca23e405STejun Heo kfree(vas); 3921ca23e405STejun Heo return vms; 3922ca23e405STejun Heo 392368ad4a33SUladzislau Rezki (Sony) recovery: 3924e36176beSUladzislau Rezki (Sony) /* 3925e36176beSUladzislau Rezki (Sony) * Remove previously allocated areas. There is no 3926e36176beSUladzislau Rezki (Sony) * need in removing these areas from the busy tree, 3927e36176beSUladzislau Rezki (Sony) * because they are inserted only on the final step 3928e36176beSUladzislau Rezki (Sony) * and when pcpu_get_vm_areas() is success. 3929e36176beSUladzislau Rezki (Sony) */ 393068ad4a33SUladzislau Rezki (Sony) while (area--) { 3931253a496dSDaniel Axtens orig_start = vas[area]->va_start; 3932253a496dSDaniel Axtens orig_end = vas[area]->va_end; 393396e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 39343c5c3cfbSDaniel Axtens &free_vmap_area_list); 39359c801f61SUladzislau Rezki (Sony) if (va) 3936253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 3937253a496dSDaniel Axtens va->va_start, va->va_end); 393868ad4a33SUladzislau Rezki (Sony) vas[area] = NULL; 393968ad4a33SUladzislau Rezki (Sony) } 394068ad4a33SUladzislau Rezki (Sony) 394168ad4a33SUladzislau Rezki (Sony) overflow: 3942e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 394368ad4a33SUladzislau Rezki (Sony) if (!purged) { 394468ad4a33SUladzislau Rezki (Sony) purge_vmap_area_lazy(); 394568ad4a33SUladzislau Rezki (Sony) purged = true; 394668ad4a33SUladzislau Rezki (Sony) 394768ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */ 394868ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 394968ad4a33SUladzislau Rezki (Sony) if (vas[area]) 395068ad4a33SUladzislau Rezki (Sony) continue; 395168ad4a33SUladzislau Rezki (Sony) 395268ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc( 395368ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL); 395468ad4a33SUladzislau Rezki (Sony) if (!vas[area]) 395568ad4a33SUladzislau Rezki (Sony) goto err_free; 395668ad4a33SUladzislau Rezki (Sony) } 395768ad4a33SUladzislau Rezki (Sony) 395868ad4a33SUladzislau Rezki (Sony) goto retry; 395968ad4a33SUladzislau Rezki (Sony) } 396068ad4a33SUladzislau Rezki (Sony) 3961ca23e405STejun Heo err_free: 3962ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 396368ad4a33SUladzislau Rezki (Sony) if (vas[area]) 396468ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]); 396568ad4a33SUladzislau Rezki (Sony) 3966ca23e405STejun Heo kfree(vms[area]); 3967ca23e405STejun Heo } 3968f1db7afdSKautuk Consul err_free2: 3969ca23e405STejun Heo kfree(vas); 3970ca23e405STejun Heo kfree(vms); 3971ca23e405STejun Heo return NULL; 3972253a496dSDaniel Axtens 3973253a496dSDaniel Axtens err_free_shadow: 3974253a496dSDaniel Axtens spin_lock(&free_vmap_area_lock); 3975253a496dSDaniel Axtens /* 3976253a496dSDaniel Axtens * We release all the vmalloc shadows, even the ones for regions that 3977253a496dSDaniel Axtens * hadn't been successfully added. This relies on kasan_release_vmalloc 3978253a496dSDaniel Axtens * being able to tolerate this case. 3979253a496dSDaniel Axtens */ 3980253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 3981253a496dSDaniel Axtens orig_start = vas[area]->va_start; 3982253a496dSDaniel Axtens orig_end = vas[area]->va_end; 398396e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 3984253a496dSDaniel Axtens &free_vmap_area_list); 39859c801f61SUladzislau Rezki (Sony) if (va) 3986253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 3987253a496dSDaniel Axtens va->va_start, va->va_end); 3988253a496dSDaniel Axtens vas[area] = NULL; 3989253a496dSDaniel Axtens kfree(vms[area]); 3990253a496dSDaniel Axtens } 3991253a496dSDaniel Axtens spin_unlock(&free_vmap_area_lock); 3992253a496dSDaniel Axtens kfree(vas); 3993253a496dSDaniel Axtens kfree(vms); 3994253a496dSDaniel Axtens return NULL; 3995ca23e405STejun Heo } 3996ca23e405STejun Heo 3997ca23e405STejun Heo /** 3998ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 3999ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 4000ca23e405STejun Heo * @nr_vms: the number of allocated areas 4001ca23e405STejun Heo * 4002ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 4003ca23e405STejun Heo */ 4004ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 4005ca23e405STejun Heo { 4006ca23e405STejun Heo int i; 4007ca23e405STejun Heo 4008ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 4009ca23e405STejun Heo free_vm_area(vms[i]); 4010ca23e405STejun Heo kfree(vms); 4011ca23e405STejun Heo } 40124f8b02b4STejun Heo #endif /* CONFIG_SMP */ 4013a10aa579SChristoph Lameter 40145bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK 401598f18083SPaul E. McKenney bool vmalloc_dump_obj(void *object) 401698f18083SPaul E. McKenney { 401798f18083SPaul E. McKenney struct vm_struct *vm; 401898f18083SPaul E. McKenney void *objp = (void *)PAGE_ALIGN((unsigned long)object); 401998f18083SPaul E. McKenney 402098f18083SPaul E. McKenney vm = find_vm_area(objp); 402198f18083SPaul E. McKenney if (!vm) 402298f18083SPaul E. McKenney return false; 4023bd34dcd4SPaul E. McKenney pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 4024bd34dcd4SPaul E. McKenney vm->nr_pages, (unsigned long)vm->addr, vm->caller); 402598f18083SPaul E. McKenney return true; 402698f18083SPaul E. McKenney } 40275bb1bb35SPaul E. McKenney #endif 402898f18083SPaul E. McKenney 4029a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 4030a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 4031e36176beSUladzislau Rezki (Sony) __acquires(&vmap_purge_lock) 4032d4033afdSJoonsoo Kim __acquires(&vmap_area_lock) 4033a10aa579SChristoph Lameter { 4034e36176beSUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock); 4035d4033afdSJoonsoo Kim spin_lock(&vmap_area_lock); 4036e36176beSUladzislau Rezki (Sony) 40373f500069Szijun_hu return seq_list_start(&vmap_area_list, *pos); 4038a10aa579SChristoph Lameter } 4039a10aa579SChristoph Lameter 4040a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4041a10aa579SChristoph Lameter { 40423f500069Szijun_hu return seq_list_next(p, &vmap_area_list, pos); 4043a10aa579SChristoph Lameter } 4044a10aa579SChristoph Lameter 4045a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 4046d4033afdSJoonsoo Kim __releases(&vmap_area_lock) 40470a7dd4e9SWaiman Long __releases(&vmap_purge_lock) 4048a10aa579SChristoph Lameter { 4049d4033afdSJoonsoo Kim spin_unlock(&vmap_area_lock); 40500a7dd4e9SWaiman Long mutex_unlock(&vmap_purge_lock); 4051a10aa579SChristoph Lameter } 4052a10aa579SChristoph Lameter 4053a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 4054a47a126aSEric Dumazet { 4055e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 4056a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 405751e50b3aSEric Dumazet unsigned int step = 1U << vm_area_page_order(v); 4058a47a126aSEric Dumazet 4059a47a126aSEric Dumazet if (!counters) 4060a47a126aSEric Dumazet return; 4061a47a126aSEric Dumazet 4062af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 4063af12346cSWanpeng Li return; 40647e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 40657e5b528bSDmitry Vyukov smp_rmb(); 4066af12346cSWanpeng Li 4067a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 4068a47a126aSEric Dumazet 406951e50b3aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr += step) 407051e50b3aSEric Dumazet counters[page_to_nid(v->pages[nr])] += step; 4071a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 4072a47a126aSEric Dumazet if (counters[nr]) 4073a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 4074a47a126aSEric Dumazet } 4075a47a126aSEric Dumazet } 4076a47a126aSEric Dumazet 4077dd3b8353SUladzislau Rezki (Sony) static void show_purge_info(struct seq_file *m) 4078dd3b8353SUladzislau Rezki (Sony) { 4079dd3b8353SUladzislau Rezki (Sony) struct vmap_area *va; 4080dd3b8353SUladzislau Rezki (Sony) 408196e2db45SUladzislau Rezki (Sony) spin_lock(&purge_vmap_area_lock); 408296e2db45SUladzislau Rezki (Sony) list_for_each_entry(va, &purge_vmap_area_list, list) { 4083dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 4084dd3b8353SUladzislau Rezki (Sony) (void *)va->va_start, (void *)va->va_end, 4085dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 4086dd3b8353SUladzislau Rezki (Sony) } 408796e2db45SUladzislau Rezki (Sony) spin_unlock(&purge_vmap_area_lock); 4088dd3b8353SUladzislau Rezki (Sony) } 4089dd3b8353SUladzislau Rezki (Sony) 4090a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 4091a10aa579SChristoph Lameter { 40923f500069Szijun_hu struct vmap_area *va; 4093d4033afdSJoonsoo Kim struct vm_struct *v; 4094d4033afdSJoonsoo Kim 40953f500069Szijun_hu va = list_entry(p, struct vmap_area, list); 40963f500069Szijun_hu 4097c2ce8c14SWanpeng Li /* 4098688fcbfcSPengfei Li * s_show can encounter race with remove_vm_area, !vm on behalf 4099688fcbfcSPengfei Li * of vmap area is being tear down or vm_map_ram allocation. 4100c2ce8c14SWanpeng Li */ 4101688fcbfcSPengfei Li if (!va->vm) { 4102dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 410378c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end, 4104dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 410578c72746SYisheng Xie 41067cc7913eSEric Dumazet goto final; 410778c72746SYisheng Xie } 4108d4033afdSJoonsoo Kim 4109d4033afdSJoonsoo Kim v = va->vm; 4110a10aa579SChristoph Lameter 411145ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 4112a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 4113a10aa579SChristoph Lameter 411462c70bceSJoe Perches if (v->caller) 411562c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 411623016969SChristoph Lameter 4117a10aa579SChristoph Lameter if (v->nr_pages) 4118a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 4119a10aa579SChristoph Lameter 4120a10aa579SChristoph Lameter if (v->phys_addr) 4121199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr); 4122a10aa579SChristoph Lameter 4123a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 4124f4527c90SFabian Frederick seq_puts(m, " ioremap"); 4125a10aa579SChristoph Lameter 4126a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 4127f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 4128a10aa579SChristoph Lameter 4129a10aa579SChristoph Lameter if (v->flags & VM_MAP) 4130f4527c90SFabian Frederick seq_puts(m, " vmap"); 4131a10aa579SChristoph Lameter 4132a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 4133f4527c90SFabian Frederick seq_puts(m, " user"); 4134a10aa579SChristoph Lameter 4135fe9041c2SChristoph Hellwig if (v->flags & VM_DMA_COHERENT) 4136fe9041c2SChristoph Hellwig seq_puts(m, " dma-coherent"); 4137fe9041c2SChristoph Hellwig 4138244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 4139f4527c90SFabian Frederick seq_puts(m, " vpages"); 4140a10aa579SChristoph Lameter 4141a47a126aSEric Dumazet show_numa_info(m, v); 4142a10aa579SChristoph Lameter seq_putc(m, '\n'); 4143dd3b8353SUladzislau Rezki (Sony) 4144dd3b8353SUladzislau Rezki (Sony) /* 414596e2db45SUladzislau Rezki (Sony) * As a final step, dump "unpurged" areas. 4146dd3b8353SUladzislau Rezki (Sony) */ 41477cc7913eSEric Dumazet final: 4148dd3b8353SUladzislau Rezki (Sony) if (list_is_last(&va->list, &vmap_area_list)) 4149dd3b8353SUladzislau Rezki (Sony) show_purge_info(m); 4150dd3b8353SUladzislau Rezki (Sony) 4151a10aa579SChristoph Lameter return 0; 4152a10aa579SChristoph Lameter } 4153a10aa579SChristoph Lameter 41545f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 4155a10aa579SChristoph Lameter .start = s_start, 4156a10aa579SChristoph Lameter .next = s_next, 4157a10aa579SChristoph Lameter .stop = s_stop, 4158a10aa579SChristoph Lameter .show = s_show, 4159a10aa579SChristoph Lameter }; 41605f6a6a9cSAlexey Dobriyan 41615f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 41625f6a6a9cSAlexey Dobriyan { 4163fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA)) 41640825a6f9SJoe Perches proc_create_seq_private("vmallocinfo", 0400, NULL, 416544414d82SChristoph Hellwig &vmalloc_op, 416644414d82SChristoph Hellwig nr_node_ids * sizeof(unsigned int), NULL); 4167fddda2b7SChristoph Hellwig else 41680825a6f9SJoe Perches proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 41695f6a6a9cSAlexey Dobriyan return 0; 41705f6a6a9cSAlexey Dobriyan } 41715f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 4172db3808c1SJoonsoo Kim 4173a10aa579SChristoph Lameter #endif 4174