1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 41da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 51da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 61da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 8d758ffe6SUladzislau Rezki (Sony) * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 15c3edc401SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/spinlock.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 21868b104dSRick Edgecombe #include <linux/set_memory.h> 223ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2323016969SChristoph Lameter #include <linux/kallsyms.h> 24db64fe02SNick Piggin #include <linux/list.h> 254da56b99SChris Wilson #include <linux/notifier.h> 26db64fe02SNick Piggin #include <linux/rbtree.h> 270f14599cSMatthew Wilcox (Oracle) #include <linux/xarray.h> 28db64fe02SNick Piggin #include <linux/rcupdate.h> 29f0aa6617STejun Heo #include <linux/pfn.h> 3089219d37SCatalin Marinas #include <linux/kmemleak.h> 3160063497SArun Sharma #include <linux/atomic.h> 323b32123dSGideon Israel Dsouza #include <linux/compiler.h> 3332fcfd40SAl Viro #include <linux/llist.h> 340f616be1SToshi Kani #include <linux/bitops.h> 3568ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h> 36bdebd6a2SJann Horn #include <linux/overflow.h> 37c0eb315aSNicholas Piggin #include <linux/pgtable.h> 387c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 391da177e4SLinus Torvalds #include <asm/tlbflush.h> 402dca6999SDavid Miller #include <asm/shmparam.h> 411da177e4SLinus Torvalds 42dd56b046SMel Gorman #include "internal.h" 432a681cfaSJoerg Roedel #include "pgalloc-track.h" 44dd56b046SMel Gorman 45121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 46121e6f32SNicholas Piggin static bool __ro_after_init vmap_allow_huge = true; 47121e6f32SNicholas Piggin 48121e6f32SNicholas Piggin static int __init set_nohugevmalloc(char *str) 49121e6f32SNicholas Piggin { 50121e6f32SNicholas Piggin vmap_allow_huge = false; 51121e6f32SNicholas Piggin return 0; 52121e6f32SNicholas Piggin } 53121e6f32SNicholas Piggin early_param("nohugevmalloc", set_nohugevmalloc); 54121e6f32SNicholas Piggin #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 55121e6f32SNicholas Piggin static const bool vmap_allow_huge = false; 56121e6f32SNicholas Piggin #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 57121e6f32SNicholas Piggin 58186525bdSIngo Molnar bool is_vmalloc_addr(const void *x) 59186525bdSIngo Molnar { 60186525bdSIngo Molnar unsigned long addr = (unsigned long)x; 61186525bdSIngo Molnar 62186525bdSIngo Molnar return addr >= VMALLOC_START && addr < VMALLOC_END; 63186525bdSIngo Molnar } 64186525bdSIngo Molnar EXPORT_SYMBOL(is_vmalloc_addr); 65186525bdSIngo Molnar 6632fcfd40SAl Viro struct vfree_deferred { 6732fcfd40SAl Viro struct llist_head list; 6832fcfd40SAl Viro struct work_struct wq; 6932fcfd40SAl Viro }; 7032fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 7132fcfd40SAl Viro 7232fcfd40SAl Viro static void __vunmap(const void *, int); 7332fcfd40SAl Viro 7432fcfd40SAl Viro static void free_work(struct work_struct *w) 7532fcfd40SAl Viro { 7632fcfd40SAl Viro struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 77894e58c1SByungchul Park struct llist_node *t, *llnode; 78894e58c1SByungchul Park 79894e58c1SByungchul Park llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 80894e58c1SByungchul Park __vunmap((void *)llnode, 1); 8132fcfd40SAl Viro } 8232fcfd40SAl Viro 83db64fe02SNick Piggin /*** Page table manipulation functions ***/ 845e9e3d77SNicholas Piggin static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 855e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 865e9e3d77SNicholas Piggin pgtbl_mod_mask *mask) 875e9e3d77SNicholas Piggin { 885e9e3d77SNicholas Piggin pte_t *pte; 895e9e3d77SNicholas Piggin u64 pfn; 905e9e3d77SNicholas Piggin 915e9e3d77SNicholas Piggin pfn = phys_addr >> PAGE_SHIFT; 925e9e3d77SNicholas Piggin pte = pte_alloc_kernel_track(pmd, addr, mask); 935e9e3d77SNicholas Piggin if (!pte) 945e9e3d77SNicholas Piggin return -ENOMEM; 955e9e3d77SNicholas Piggin do { 965e9e3d77SNicholas Piggin BUG_ON(!pte_none(*pte)); 975e9e3d77SNicholas Piggin set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 985e9e3d77SNicholas Piggin pfn++; 995e9e3d77SNicholas Piggin } while (pte++, addr += PAGE_SIZE, addr != end); 1005e9e3d77SNicholas Piggin *mask |= PGTBL_PTE_MODIFIED; 1015e9e3d77SNicholas Piggin return 0; 1025e9e3d77SNicholas Piggin } 1035e9e3d77SNicholas Piggin 1045e9e3d77SNicholas Piggin static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 1055e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1065e9e3d77SNicholas Piggin unsigned int max_page_shift) 1075e9e3d77SNicholas Piggin { 1085e9e3d77SNicholas Piggin if (max_page_shift < PMD_SHIFT) 1095e9e3d77SNicholas Piggin return 0; 1105e9e3d77SNicholas Piggin 1115e9e3d77SNicholas Piggin if (!arch_vmap_pmd_supported(prot)) 1125e9e3d77SNicholas Piggin return 0; 1135e9e3d77SNicholas Piggin 1145e9e3d77SNicholas Piggin if ((end - addr) != PMD_SIZE) 1155e9e3d77SNicholas Piggin return 0; 1165e9e3d77SNicholas Piggin 1175e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PMD_SIZE)) 1185e9e3d77SNicholas Piggin return 0; 1195e9e3d77SNicholas Piggin 1205e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 1215e9e3d77SNicholas Piggin return 0; 1225e9e3d77SNicholas Piggin 1235e9e3d77SNicholas Piggin if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 1245e9e3d77SNicholas Piggin return 0; 1255e9e3d77SNicholas Piggin 1265e9e3d77SNicholas Piggin return pmd_set_huge(pmd, phys_addr, prot); 1275e9e3d77SNicholas Piggin } 1285e9e3d77SNicholas Piggin 1295e9e3d77SNicholas Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 1305e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1315e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 1325e9e3d77SNicholas Piggin { 1335e9e3d77SNicholas Piggin pmd_t *pmd; 1345e9e3d77SNicholas Piggin unsigned long next; 1355e9e3d77SNicholas Piggin 1365e9e3d77SNicholas Piggin pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 1375e9e3d77SNicholas Piggin if (!pmd) 1385e9e3d77SNicholas Piggin return -ENOMEM; 1395e9e3d77SNicholas Piggin do { 1405e9e3d77SNicholas Piggin next = pmd_addr_end(addr, end); 1415e9e3d77SNicholas Piggin 1425e9e3d77SNicholas Piggin if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 1435e9e3d77SNicholas Piggin max_page_shift)) { 1445e9e3d77SNicholas Piggin *mask |= PGTBL_PMD_MODIFIED; 1455e9e3d77SNicholas Piggin continue; 1465e9e3d77SNicholas Piggin } 1475e9e3d77SNicholas Piggin 1485e9e3d77SNicholas Piggin if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask)) 1495e9e3d77SNicholas Piggin return -ENOMEM; 1505e9e3d77SNicholas Piggin } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 1515e9e3d77SNicholas Piggin return 0; 1525e9e3d77SNicholas Piggin } 1535e9e3d77SNicholas Piggin 1545e9e3d77SNicholas Piggin static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 1555e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1565e9e3d77SNicholas Piggin unsigned int max_page_shift) 1575e9e3d77SNicholas Piggin { 1585e9e3d77SNicholas Piggin if (max_page_shift < PUD_SHIFT) 1595e9e3d77SNicholas Piggin return 0; 1605e9e3d77SNicholas Piggin 1615e9e3d77SNicholas Piggin if (!arch_vmap_pud_supported(prot)) 1625e9e3d77SNicholas Piggin return 0; 1635e9e3d77SNicholas Piggin 1645e9e3d77SNicholas Piggin if ((end - addr) != PUD_SIZE) 1655e9e3d77SNicholas Piggin return 0; 1665e9e3d77SNicholas Piggin 1675e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PUD_SIZE)) 1685e9e3d77SNicholas Piggin return 0; 1695e9e3d77SNicholas Piggin 1705e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 1715e9e3d77SNicholas Piggin return 0; 1725e9e3d77SNicholas Piggin 1735e9e3d77SNicholas Piggin if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 1745e9e3d77SNicholas Piggin return 0; 1755e9e3d77SNicholas Piggin 1765e9e3d77SNicholas Piggin return pud_set_huge(pud, phys_addr, prot); 1775e9e3d77SNicholas Piggin } 1785e9e3d77SNicholas Piggin 1795e9e3d77SNicholas Piggin static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 1805e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1815e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 1825e9e3d77SNicholas Piggin { 1835e9e3d77SNicholas Piggin pud_t *pud; 1845e9e3d77SNicholas Piggin unsigned long next; 1855e9e3d77SNicholas Piggin 1865e9e3d77SNicholas Piggin pud = pud_alloc_track(&init_mm, p4d, addr, mask); 1875e9e3d77SNicholas Piggin if (!pud) 1885e9e3d77SNicholas Piggin return -ENOMEM; 1895e9e3d77SNicholas Piggin do { 1905e9e3d77SNicholas Piggin next = pud_addr_end(addr, end); 1915e9e3d77SNicholas Piggin 1925e9e3d77SNicholas Piggin if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 1935e9e3d77SNicholas Piggin max_page_shift)) { 1945e9e3d77SNicholas Piggin *mask |= PGTBL_PUD_MODIFIED; 1955e9e3d77SNicholas Piggin continue; 1965e9e3d77SNicholas Piggin } 1975e9e3d77SNicholas Piggin 1985e9e3d77SNicholas Piggin if (vmap_pmd_range(pud, addr, next, phys_addr, prot, 1995e9e3d77SNicholas Piggin max_page_shift, mask)) 2005e9e3d77SNicholas Piggin return -ENOMEM; 2015e9e3d77SNicholas Piggin } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 2025e9e3d77SNicholas Piggin return 0; 2035e9e3d77SNicholas Piggin } 2045e9e3d77SNicholas Piggin 2055e9e3d77SNicholas Piggin static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 2065e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2075e9e3d77SNicholas Piggin unsigned int max_page_shift) 2085e9e3d77SNicholas Piggin { 2095e9e3d77SNicholas Piggin if (max_page_shift < P4D_SHIFT) 2105e9e3d77SNicholas Piggin return 0; 2115e9e3d77SNicholas Piggin 2125e9e3d77SNicholas Piggin if (!arch_vmap_p4d_supported(prot)) 2135e9e3d77SNicholas Piggin return 0; 2145e9e3d77SNicholas Piggin 2155e9e3d77SNicholas Piggin if ((end - addr) != P4D_SIZE) 2165e9e3d77SNicholas Piggin return 0; 2175e9e3d77SNicholas Piggin 2185e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, P4D_SIZE)) 2195e9e3d77SNicholas Piggin return 0; 2205e9e3d77SNicholas Piggin 2215e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 2225e9e3d77SNicholas Piggin return 0; 2235e9e3d77SNicholas Piggin 2245e9e3d77SNicholas Piggin if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 2255e9e3d77SNicholas Piggin return 0; 2265e9e3d77SNicholas Piggin 2275e9e3d77SNicholas Piggin return p4d_set_huge(p4d, phys_addr, prot); 2285e9e3d77SNicholas Piggin } 2295e9e3d77SNicholas Piggin 2305e9e3d77SNicholas Piggin static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 2315e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2325e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 2335e9e3d77SNicholas Piggin { 2345e9e3d77SNicholas Piggin p4d_t *p4d; 2355e9e3d77SNicholas Piggin unsigned long next; 2365e9e3d77SNicholas Piggin 2375e9e3d77SNicholas Piggin p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 2385e9e3d77SNicholas Piggin if (!p4d) 2395e9e3d77SNicholas Piggin return -ENOMEM; 2405e9e3d77SNicholas Piggin do { 2415e9e3d77SNicholas Piggin next = p4d_addr_end(addr, end); 2425e9e3d77SNicholas Piggin 2435e9e3d77SNicholas Piggin if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 2445e9e3d77SNicholas Piggin max_page_shift)) { 2455e9e3d77SNicholas Piggin *mask |= PGTBL_P4D_MODIFIED; 2465e9e3d77SNicholas Piggin continue; 2475e9e3d77SNicholas Piggin } 2485e9e3d77SNicholas Piggin 2495e9e3d77SNicholas Piggin if (vmap_pud_range(p4d, addr, next, phys_addr, prot, 2505e9e3d77SNicholas Piggin max_page_shift, mask)) 2515e9e3d77SNicholas Piggin return -ENOMEM; 2525e9e3d77SNicholas Piggin } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 2535e9e3d77SNicholas Piggin return 0; 2545e9e3d77SNicholas Piggin } 2555e9e3d77SNicholas Piggin 2565d87510dSNicholas Piggin static int vmap_range_noflush(unsigned long addr, unsigned long end, 2575e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2585e9e3d77SNicholas Piggin unsigned int max_page_shift) 2595e9e3d77SNicholas Piggin { 2605e9e3d77SNicholas Piggin pgd_t *pgd; 2615e9e3d77SNicholas Piggin unsigned long start; 2625e9e3d77SNicholas Piggin unsigned long next; 2635e9e3d77SNicholas Piggin int err; 2645e9e3d77SNicholas Piggin pgtbl_mod_mask mask = 0; 2655e9e3d77SNicholas Piggin 2665e9e3d77SNicholas Piggin might_sleep(); 2675e9e3d77SNicholas Piggin BUG_ON(addr >= end); 2685e9e3d77SNicholas Piggin 2695e9e3d77SNicholas Piggin start = addr; 2705e9e3d77SNicholas Piggin pgd = pgd_offset_k(addr); 2715e9e3d77SNicholas Piggin do { 2725e9e3d77SNicholas Piggin next = pgd_addr_end(addr, end); 2735e9e3d77SNicholas Piggin err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 2745e9e3d77SNicholas Piggin max_page_shift, &mask); 2755e9e3d77SNicholas Piggin if (err) 2765e9e3d77SNicholas Piggin break; 2775e9e3d77SNicholas Piggin } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 2785e9e3d77SNicholas Piggin 2795e9e3d77SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 2805e9e3d77SNicholas Piggin arch_sync_kernel_mappings(start, end); 2815e9e3d77SNicholas Piggin 2825e9e3d77SNicholas Piggin return err; 2835e9e3d77SNicholas Piggin } 284b221385bSAdrian Bunk 2855d87510dSNicholas Piggin int vmap_range(unsigned long addr, unsigned long end, 2865d87510dSNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2875d87510dSNicholas Piggin unsigned int max_page_shift) 2885d87510dSNicholas Piggin { 2895d87510dSNicholas Piggin int err; 2905d87510dSNicholas Piggin 2915d87510dSNicholas Piggin err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift); 2925d87510dSNicholas Piggin flush_cache_vmap(addr, end); 2935d87510dSNicholas Piggin 2945d87510dSNicholas Piggin return err; 2955d87510dSNicholas Piggin } 2965d87510dSNicholas Piggin 2972ba3e694SJoerg Roedel static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 2982ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 2991da177e4SLinus Torvalds { 3001da177e4SLinus Torvalds pte_t *pte; 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 3031da177e4SLinus Torvalds do { 3041da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 3051da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 3061da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 3072ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 3081da177e4SLinus Torvalds } 3091da177e4SLinus Torvalds 3102ba3e694SJoerg Roedel static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 3112ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3121da177e4SLinus Torvalds { 3131da177e4SLinus Torvalds pmd_t *pmd; 3141da177e4SLinus Torvalds unsigned long next; 3152ba3e694SJoerg Roedel int cleared; 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 3181da177e4SLinus Torvalds do { 3191da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 3202ba3e694SJoerg Roedel 3212ba3e694SJoerg Roedel cleared = pmd_clear_huge(pmd); 3222ba3e694SJoerg Roedel if (cleared || pmd_bad(*pmd)) 3232ba3e694SJoerg Roedel *mask |= PGTBL_PMD_MODIFIED; 3242ba3e694SJoerg Roedel 3252ba3e694SJoerg Roedel if (cleared) 326b9820d8fSToshi Kani continue; 3271da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 3281da177e4SLinus Torvalds continue; 3292ba3e694SJoerg Roedel vunmap_pte_range(pmd, addr, next, mask); 330e47110e9SAneesh Kumar K.V 331e47110e9SAneesh Kumar K.V cond_resched(); 3321da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 3331da177e4SLinus Torvalds } 3341da177e4SLinus Torvalds 3352ba3e694SJoerg Roedel static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 3362ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3371da177e4SLinus Torvalds { 3381da177e4SLinus Torvalds pud_t *pud; 3391da177e4SLinus Torvalds unsigned long next; 3402ba3e694SJoerg Roedel int cleared; 3411da177e4SLinus Torvalds 342c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 3431da177e4SLinus Torvalds do { 3441da177e4SLinus Torvalds next = pud_addr_end(addr, end); 3452ba3e694SJoerg Roedel 3462ba3e694SJoerg Roedel cleared = pud_clear_huge(pud); 3472ba3e694SJoerg Roedel if (cleared || pud_bad(*pud)) 3482ba3e694SJoerg Roedel *mask |= PGTBL_PUD_MODIFIED; 3492ba3e694SJoerg Roedel 3502ba3e694SJoerg Roedel if (cleared) 351b9820d8fSToshi Kani continue; 3521da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 3531da177e4SLinus Torvalds continue; 3542ba3e694SJoerg Roedel vunmap_pmd_range(pud, addr, next, mask); 3551da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 3561da177e4SLinus Torvalds } 3571da177e4SLinus Torvalds 3582ba3e694SJoerg Roedel static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 3592ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 360c2febafcSKirill A. Shutemov { 361c2febafcSKirill A. Shutemov p4d_t *p4d; 362c2febafcSKirill A. Shutemov unsigned long next; 3632ba3e694SJoerg Roedel int cleared; 364c2febafcSKirill A. Shutemov 365c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 366c2febafcSKirill A. Shutemov do { 367c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 3682ba3e694SJoerg Roedel 3692ba3e694SJoerg Roedel cleared = p4d_clear_huge(p4d); 3702ba3e694SJoerg Roedel if (cleared || p4d_bad(*p4d)) 3712ba3e694SJoerg Roedel *mask |= PGTBL_P4D_MODIFIED; 3722ba3e694SJoerg Roedel 3732ba3e694SJoerg Roedel if (cleared) 374c2febafcSKirill A. Shutemov continue; 375c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 376c2febafcSKirill A. Shutemov continue; 3772ba3e694SJoerg Roedel vunmap_pud_range(p4d, addr, next, mask); 378c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 379c2febafcSKirill A. Shutemov } 380c2febafcSKirill A. Shutemov 381b521c43fSChristoph Hellwig /** 382b521c43fSChristoph Hellwig * unmap_kernel_range_noflush - unmap kernel VM area 3832ba3e694SJoerg Roedel * @start: start of the VM area to unmap 384b521c43fSChristoph Hellwig * @size: size of the VM area to unmap 385b521c43fSChristoph Hellwig * 386b521c43fSChristoph Hellwig * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify 387b521c43fSChristoph Hellwig * should have been allocated using get_vm_area() and its friends. 388b521c43fSChristoph Hellwig * 389b521c43fSChristoph Hellwig * NOTE: 390b521c43fSChristoph Hellwig * This function does NOT do any cache flushing. The caller is responsible 391b521c43fSChristoph Hellwig * for calling flush_cache_vunmap() on to-be-mapped areas before calling this 392b521c43fSChristoph Hellwig * function and flush_tlb_kernel_range() after. 393b521c43fSChristoph Hellwig */ 3942ba3e694SJoerg Roedel void unmap_kernel_range_noflush(unsigned long start, unsigned long size) 3951da177e4SLinus Torvalds { 3962ba3e694SJoerg Roedel unsigned long end = start + size; 3971da177e4SLinus Torvalds unsigned long next; 398b521c43fSChristoph Hellwig pgd_t *pgd; 3992ba3e694SJoerg Roedel unsigned long addr = start; 4002ba3e694SJoerg Roedel pgtbl_mod_mask mask = 0; 4011da177e4SLinus Torvalds 4021da177e4SLinus Torvalds BUG_ON(addr >= end); 4031da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 4041da177e4SLinus Torvalds do { 4051da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 4062ba3e694SJoerg Roedel if (pgd_bad(*pgd)) 4072ba3e694SJoerg Roedel mask |= PGTBL_PGD_MODIFIED; 4081da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 4091da177e4SLinus Torvalds continue; 4102ba3e694SJoerg Roedel vunmap_p4d_range(pgd, addr, next, &mask); 4111da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 4122ba3e694SJoerg Roedel 4132ba3e694SJoerg Roedel if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 4142ba3e694SJoerg Roedel arch_sync_kernel_mappings(start, end); 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds 4170a264884SNicholas Piggin static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 4182ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 4192ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 4201da177e4SLinus Torvalds { 4211da177e4SLinus Torvalds pte_t *pte; 4221da177e4SLinus Torvalds 423db64fe02SNick Piggin /* 424db64fe02SNick Piggin * nr is a running index into the array which helps higher level 425db64fe02SNick Piggin * callers keep track of where we're up to. 426db64fe02SNick Piggin */ 427db64fe02SNick Piggin 4282ba3e694SJoerg Roedel pte = pte_alloc_kernel_track(pmd, addr, mask); 4291da177e4SLinus Torvalds if (!pte) 4301da177e4SLinus Torvalds return -ENOMEM; 4311da177e4SLinus Torvalds do { 432db64fe02SNick Piggin struct page *page = pages[*nr]; 433db64fe02SNick Piggin 434db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 435db64fe02SNick Piggin return -EBUSY; 436db64fe02SNick Piggin if (WARN_ON(!page)) 4371da177e4SLinus Torvalds return -ENOMEM; 4381da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 439db64fe02SNick Piggin (*nr)++; 4401da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 4412ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 4421da177e4SLinus Torvalds return 0; 4431da177e4SLinus Torvalds } 4441da177e4SLinus Torvalds 4450a264884SNicholas Piggin static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 4462ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 4472ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 4481da177e4SLinus Torvalds { 4491da177e4SLinus Torvalds pmd_t *pmd; 4501da177e4SLinus Torvalds unsigned long next; 4511da177e4SLinus Torvalds 4522ba3e694SJoerg Roedel pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 4531da177e4SLinus Torvalds if (!pmd) 4541da177e4SLinus Torvalds return -ENOMEM; 4551da177e4SLinus Torvalds do { 4561da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 4570a264884SNicholas Piggin if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 4581da177e4SLinus Torvalds return -ENOMEM; 4591da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 4601da177e4SLinus Torvalds return 0; 4611da177e4SLinus Torvalds } 4621da177e4SLinus Torvalds 4630a264884SNicholas Piggin static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 4642ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 4652ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 4661da177e4SLinus Torvalds { 4671da177e4SLinus Torvalds pud_t *pud; 4681da177e4SLinus Torvalds unsigned long next; 4691da177e4SLinus Torvalds 4702ba3e694SJoerg Roedel pud = pud_alloc_track(&init_mm, p4d, addr, mask); 4711da177e4SLinus Torvalds if (!pud) 4721da177e4SLinus Torvalds return -ENOMEM; 4731da177e4SLinus Torvalds do { 4741da177e4SLinus Torvalds next = pud_addr_end(addr, end); 4750a264884SNicholas Piggin if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 4761da177e4SLinus Torvalds return -ENOMEM; 4771da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 4781da177e4SLinus Torvalds return 0; 4791da177e4SLinus Torvalds } 4801da177e4SLinus Torvalds 4810a264884SNicholas Piggin static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 4822ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 4832ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 484c2febafcSKirill A. Shutemov { 485c2febafcSKirill A. Shutemov p4d_t *p4d; 486c2febafcSKirill A. Shutemov unsigned long next; 487c2febafcSKirill A. Shutemov 4882ba3e694SJoerg Roedel p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 489c2febafcSKirill A. Shutemov if (!p4d) 490c2febafcSKirill A. Shutemov return -ENOMEM; 491c2febafcSKirill A. Shutemov do { 492c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 4930a264884SNicholas Piggin if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 494c2febafcSKirill A. Shutemov return -ENOMEM; 495c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 496c2febafcSKirill A. Shutemov return 0; 497c2febafcSKirill A. Shutemov } 498c2febafcSKirill A. Shutemov 499121e6f32SNicholas Piggin static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 500121e6f32SNicholas Piggin pgprot_t prot, struct page **pages) 501121e6f32SNicholas Piggin { 502121e6f32SNicholas Piggin unsigned long start = addr; 503121e6f32SNicholas Piggin pgd_t *pgd; 504121e6f32SNicholas Piggin unsigned long next; 505121e6f32SNicholas Piggin int err = 0; 506121e6f32SNicholas Piggin int nr = 0; 507121e6f32SNicholas Piggin pgtbl_mod_mask mask = 0; 508121e6f32SNicholas Piggin 509121e6f32SNicholas Piggin BUG_ON(addr >= end); 510121e6f32SNicholas Piggin pgd = pgd_offset_k(addr); 511121e6f32SNicholas Piggin do { 512121e6f32SNicholas Piggin next = pgd_addr_end(addr, end); 513121e6f32SNicholas Piggin if (pgd_bad(*pgd)) 514121e6f32SNicholas Piggin mask |= PGTBL_PGD_MODIFIED; 515121e6f32SNicholas Piggin err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 516121e6f32SNicholas Piggin if (err) 517121e6f32SNicholas Piggin return err; 518121e6f32SNicholas Piggin } while (pgd++, addr = next, addr != end); 519121e6f32SNicholas Piggin 520121e6f32SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 521121e6f32SNicholas Piggin arch_sync_kernel_mappings(start, end); 522121e6f32SNicholas Piggin 523121e6f32SNicholas Piggin return 0; 524121e6f32SNicholas Piggin } 525121e6f32SNicholas Piggin 526*b67177ecSNicholas Piggin /* 527*b67177ecSNicholas Piggin * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 528*b67177ecSNicholas Piggin * flush caches. 529*b67177ecSNicholas Piggin * 530*b67177ecSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() after this 531*b67177ecSNicholas Piggin * function returns successfully and before the addresses are accessed. 532*b67177ecSNicholas Piggin * 533*b67177ecSNicholas Piggin * This is an internal function only. Do not use outside mm/. 534*b67177ecSNicholas Piggin */ 535*b67177ecSNicholas Piggin int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 536121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift) 537121e6f32SNicholas Piggin { 538121e6f32SNicholas Piggin unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 539121e6f32SNicholas Piggin 540121e6f32SNicholas Piggin WARN_ON(page_shift < PAGE_SHIFT); 541121e6f32SNicholas Piggin 542121e6f32SNicholas Piggin if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 543121e6f32SNicholas Piggin page_shift == PAGE_SHIFT) 544121e6f32SNicholas Piggin return vmap_small_pages_range_noflush(addr, end, prot, pages); 545121e6f32SNicholas Piggin 546121e6f32SNicholas Piggin for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 547121e6f32SNicholas Piggin int err; 548121e6f32SNicholas Piggin 549121e6f32SNicholas Piggin err = vmap_range_noflush(addr, addr + (1UL << page_shift), 550121e6f32SNicholas Piggin __pa(page_address(pages[i])), prot, 551121e6f32SNicholas Piggin page_shift); 552121e6f32SNicholas Piggin if (err) 553121e6f32SNicholas Piggin return err; 554121e6f32SNicholas Piggin 555121e6f32SNicholas Piggin addr += 1UL << page_shift; 556121e6f32SNicholas Piggin } 557121e6f32SNicholas Piggin 558121e6f32SNicholas Piggin return 0; 559121e6f32SNicholas Piggin } 560121e6f32SNicholas Piggin 561*b67177ecSNicholas Piggin /** 562*b67177ecSNicholas Piggin * vmap_pages_range - map pages to a kernel virtual address 563*b67177ecSNicholas Piggin * @addr: start of the VM area to map 564*b67177ecSNicholas Piggin * @end: end of the VM area to map (non-inclusive) 565*b67177ecSNicholas Piggin * @prot: page protection flags to use 566*b67177ecSNicholas Piggin * @pages: pages to map (always PAGE_SIZE pages) 567*b67177ecSNicholas Piggin * @page_shift: maximum shift that the pages may be mapped with, @pages must 568*b67177ecSNicholas Piggin * be aligned and contiguous up to at least this shift. 569*b67177ecSNicholas Piggin * 570*b67177ecSNicholas Piggin * RETURNS: 571*b67177ecSNicholas Piggin * 0 on success, -errno on failure. 572*b67177ecSNicholas Piggin */ 573121e6f32SNicholas Piggin static int vmap_pages_range(unsigned long addr, unsigned long end, 574121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift) 575121e6f32SNicholas Piggin { 576121e6f32SNicholas Piggin int err; 577121e6f32SNicholas Piggin 578121e6f32SNicholas Piggin err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 579121e6f32SNicholas Piggin flush_cache_vmap(addr, end); 580121e6f32SNicholas Piggin return err; 581121e6f32SNicholas Piggin } 582121e6f32SNicholas Piggin 58381ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 58473bdf0a6SLinus Torvalds { 58573bdf0a6SLinus Torvalds /* 586ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 58773bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 58873bdf0a6SLinus Torvalds * just put it in the vmalloc space. 58973bdf0a6SLinus Torvalds */ 59073bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 59173bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 59273bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 59373bdf0a6SLinus Torvalds return 1; 59473bdf0a6SLinus Torvalds #endif 59573bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 59673bdf0a6SLinus Torvalds } 59773bdf0a6SLinus Torvalds 59848667e7aSChristoph Lameter /* 599c0eb315aSNicholas Piggin * Walk a vmap address to the struct page it maps. Huge vmap mappings will 600c0eb315aSNicholas Piggin * return the tail page that corresponds to the base page address, which 601c0eb315aSNicholas Piggin * matches small vmap mappings. 60248667e7aSChristoph Lameter */ 603add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 60448667e7aSChristoph Lameter { 60548667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 606add688fbSmalc struct page *page = NULL; 60748667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 608c2febafcSKirill A. Shutemov p4d_t *p4d; 609c2febafcSKirill A. Shutemov pud_t *pud; 610c2febafcSKirill A. Shutemov pmd_t *pmd; 611c2febafcSKirill A. Shutemov pte_t *ptep, pte; 61248667e7aSChristoph Lameter 6137aa413deSIngo Molnar /* 6147aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 6157aa413deSIngo Molnar * architectures that do not vmalloc module space 6167aa413deSIngo Molnar */ 61773bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 61859ea7463SJiri Slaby 619c2febafcSKirill A. Shutemov if (pgd_none(*pgd)) 620c2febafcSKirill A. Shutemov return NULL; 621c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_leaf(*pgd))) 622c0eb315aSNicholas Piggin return NULL; /* XXX: no allowance for huge pgd */ 623c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_bad(*pgd))) 624c0eb315aSNicholas Piggin return NULL; 625c0eb315aSNicholas Piggin 626c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 627c2febafcSKirill A. Shutemov if (p4d_none(*p4d)) 628c2febafcSKirill A. Shutemov return NULL; 629c0eb315aSNicholas Piggin if (p4d_leaf(*p4d)) 630c0eb315aSNicholas Piggin return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 631c0eb315aSNicholas Piggin if (WARN_ON_ONCE(p4d_bad(*p4d))) 632c2febafcSKirill A. Shutemov return NULL; 633c0eb315aSNicholas Piggin 634c0eb315aSNicholas Piggin pud = pud_offset(p4d, addr); 635c0eb315aSNicholas Piggin if (pud_none(*pud)) 636c0eb315aSNicholas Piggin return NULL; 637c0eb315aSNicholas Piggin if (pud_leaf(*pud)) 638c0eb315aSNicholas Piggin return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 639c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pud_bad(*pud))) 640c0eb315aSNicholas Piggin return NULL; 641c0eb315aSNicholas Piggin 642c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr); 643c0eb315aSNicholas Piggin if (pmd_none(*pmd)) 644c0eb315aSNicholas Piggin return NULL; 645c0eb315aSNicholas Piggin if (pmd_leaf(*pmd)) 646c0eb315aSNicholas Piggin return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 647c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pmd_bad(*pmd))) 648c2febafcSKirill A. Shutemov return NULL; 649db64fe02SNick Piggin 65048667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 65148667e7aSChristoph Lameter pte = *ptep; 65248667e7aSChristoph Lameter if (pte_present(pte)) 653add688fbSmalc page = pte_page(pte); 65448667e7aSChristoph Lameter pte_unmap(ptep); 655c0eb315aSNicholas Piggin 656add688fbSmalc return page; 657ece86e22SJianyu Zhan } 658ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 659ece86e22SJianyu Zhan 660add688fbSmalc /* 661add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 662add688fbSmalc */ 663add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 664add688fbSmalc { 665add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 666add688fbSmalc } 667add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 668add688fbSmalc 669db64fe02SNick Piggin 670db64fe02SNick Piggin /*** Global kva allocator ***/ 671db64fe02SNick Piggin 672bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 673a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 674bb850f4dSUladzislau Rezki (Sony) 675db64fe02SNick Piggin 676db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 677e36176beSUladzislau Rezki (Sony) static DEFINE_SPINLOCK(free_vmap_area_lock); 678f1c4069eSJoonsoo Kim /* Export for kexec only */ 679f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list); 68089699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 68168ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly; 68289699605SNick Piggin 68396e2db45SUladzislau Rezki (Sony) static struct rb_root purge_vmap_area_root = RB_ROOT; 68496e2db45SUladzislau Rezki (Sony) static LIST_HEAD(purge_vmap_area_list); 68596e2db45SUladzislau Rezki (Sony) static DEFINE_SPINLOCK(purge_vmap_area_lock); 68696e2db45SUladzislau Rezki (Sony) 68768ad4a33SUladzislau Rezki (Sony) /* 68868ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of 68968ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to 69068ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of 69168ad4a33SUladzislau Rezki (Sony) * free block. 69268ad4a33SUladzislau Rezki (Sony) */ 69368ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep; 69489699605SNick Piggin 69568ad4a33SUladzislau Rezki (Sony) /* 69668ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root. 69768ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing. 69868ad4a33SUladzislau Rezki (Sony) */ 69968ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list); 70068ad4a33SUladzislau Rezki (Sony) 70168ad4a33SUladzislau Rezki (Sony) /* 70268ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space. 70368ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start 70468ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap 70568ad4a33SUladzislau Rezki (Sony) * object is released. 70668ad4a33SUladzislau Rezki (Sony) * 70768ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block 70868ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to 70968ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area. 71068ad4a33SUladzislau Rezki (Sony) */ 71168ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT; 71268ad4a33SUladzislau Rezki (Sony) 71382dd23e8SUladzislau Rezki (Sony) /* 71482dd23e8SUladzislau Rezki (Sony) * Preload a CPU with one object for "no edge" split case. The 71582dd23e8SUladzislau Rezki (Sony) * aim is to get rid of allocations from the atomic context, thus 71682dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks. 71782dd23e8SUladzislau Rezki (Sony) */ 71882dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 71982dd23e8SUladzislau Rezki (Sony) 72068ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 72168ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va) 72268ad4a33SUladzislau Rezki (Sony) { 72368ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start); 72468ad4a33SUladzislau Rezki (Sony) } 72568ad4a33SUladzislau Rezki (Sony) 72668ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 72768ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node) 72868ad4a33SUladzislau Rezki (Sony) { 72968ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 73068ad4a33SUladzislau Rezki (Sony) 73168ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node); 73268ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0; 73368ad4a33SUladzislau Rezki (Sony) } 73468ad4a33SUladzislau Rezki (Sony) 73568ad4a33SUladzislau Rezki (Sony) /* 73668ad4a33SUladzislau Rezki (Sony) * Gets called when remove the node and rotate. 73768ad4a33SUladzislau Rezki (Sony) */ 73868ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 73968ad4a33SUladzislau Rezki (Sony) compute_subtree_max_size(struct vmap_area *va) 74068ad4a33SUladzislau Rezki (Sony) { 74168ad4a33SUladzislau Rezki (Sony) return max3(va_size(va), 74268ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_left), 74368ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_right)); 74468ad4a33SUladzislau Rezki (Sony) } 74568ad4a33SUladzislau Rezki (Sony) 746315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 747315cc066SMichel Lespinasse struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 74868ad4a33SUladzislau Rezki (Sony) 74968ad4a33SUladzislau Rezki (Sony) static void purge_vmap_area_lazy(void); 75068ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 75168ad4a33SUladzislau Rezki (Sony) static unsigned long lazy_max_pages(void); 752db64fe02SNick Piggin 75397105f0aSRoman Gushchin static atomic_long_t nr_vmalloc_pages; 75497105f0aSRoman Gushchin 75597105f0aSRoman Gushchin unsigned long vmalloc_nr_pages(void) 75697105f0aSRoman Gushchin { 75797105f0aSRoman Gushchin return atomic_long_read(&nr_vmalloc_pages); 75897105f0aSRoman Gushchin } 75997105f0aSRoman Gushchin 760db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 7611da177e4SLinus Torvalds { 762db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 763db64fe02SNick Piggin 764db64fe02SNick Piggin while (n) { 765db64fe02SNick Piggin struct vmap_area *va; 766db64fe02SNick Piggin 767db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 768db64fe02SNick Piggin if (addr < va->va_start) 769db64fe02SNick Piggin n = n->rb_left; 770cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 771db64fe02SNick Piggin n = n->rb_right; 772db64fe02SNick Piggin else 773db64fe02SNick Piggin return va; 774db64fe02SNick Piggin } 775db64fe02SNick Piggin 776db64fe02SNick Piggin return NULL; 777db64fe02SNick Piggin } 778db64fe02SNick Piggin 77968ad4a33SUladzislau Rezki (Sony) /* 78068ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node 78168ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing. 7829c801f61SUladzislau Rezki (Sony) * 7839c801f61SUladzislau Rezki (Sony) * Otherwise NULL is returned. In that case all further 7849c801f61SUladzislau Rezki (Sony) * steps regarding inserting of conflicting overlap range 7859c801f61SUladzislau Rezki (Sony) * have to be declined and actually considered as a bug. 78668ad4a33SUladzislau Rezki (Sony) */ 78768ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node ** 78868ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va, 78968ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from, 79068ad4a33SUladzislau Rezki (Sony) struct rb_node **parent) 791db64fe02SNick Piggin { 792170168d0SNamhyung Kim struct vmap_area *tmp_va; 79368ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 794db64fe02SNick Piggin 79568ad4a33SUladzislau Rezki (Sony) if (root) { 79668ad4a33SUladzislau Rezki (Sony) link = &root->rb_node; 79768ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) { 79868ad4a33SUladzislau Rezki (Sony) *parent = NULL; 79968ad4a33SUladzislau Rezki (Sony) return link; 80068ad4a33SUladzislau Rezki (Sony) } 80168ad4a33SUladzislau Rezki (Sony) } else { 80268ad4a33SUladzislau Rezki (Sony) link = &from; 80368ad4a33SUladzislau Rezki (Sony) } 80468ad4a33SUladzislau Rezki (Sony) 80568ad4a33SUladzislau Rezki (Sony) /* 80668ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point 80768ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name 80868ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to. 80968ad4a33SUladzislau Rezki (Sony) */ 81068ad4a33SUladzislau Rezki (Sony) do { 81168ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node); 81268ad4a33SUladzislau Rezki (Sony) 81368ad4a33SUladzislau Rezki (Sony) /* 81468ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check. 81568ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right) 81668ad4a33SUladzislau Rezki (Sony) * or full overlaps. 81768ad4a33SUladzislau Rezki (Sony) */ 81868ad4a33SUladzislau Rezki (Sony) if (va->va_start < tmp_va->va_end && 81968ad4a33SUladzislau Rezki (Sony) va->va_end <= tmp_va->va_start) 82068ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left; 82168ad4a33SUladzislau Rezki (Sony) else if (va->va_end > tmp_va->va_start && 82268ad4a33SUladzislau Rezki (Sony) va->va_start >= tmp_va->va_end) 82368ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right; 8249c801f61SUladzislau Rezki (Sony) else { 8259c801f61SUladzislau Rezki (Sony) WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 8269c801f61SUladzislau Rezki (Sony) va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 8279c801f61SUladzislau Rezki (Sony) 8289c801f61SUladzislau Rezki (Sony) return NULL; 8299c801f61SUladzislau Rezki (Sony) } 83068ad4a33SUladzislau Rezki (Sony) } while (*link); 83168ad4a33SUladzislau Rezki (Sony) 83268ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node; 83368ad4a33SUladzislau Rezki (Sony) return link; 834db64fe02SNick Piggin } 835db64fe02SNick Piggin 83668ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head * 83768ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 83868ad4a33SUladzislau Rezki (Sony) { 83968ad4a33SUladzislau Rezki (Sony) struct list_head *list; 840db64fe02SNick Piggin 84168ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent)) 84268ad4a33SUladzislau Rezki (Sony) /* 84368ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors 84468ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means 84568ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not 84668ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway. 84768ad4a33SUladzislau Rezki (Sony) */ 84868ad4a33SUladzislau Rezki (Sony) return NULL; 84968ad4a33SUladzislau Rezki (Sony) 85068ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list; 85168ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list); 852db64fe02SNick Piggin } 853db64fe02SNick Piggin 85468ad4a33SUladzislau Rezki (Sony) static __always_inline void 85568ad4a33SUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root, 85668ad4a33SUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, struct list_head *head) 85768ad4a33SUladzislau Rezki (Sony) { 85868ad4a33SUladzislau Rezki (Sony) /* 85968ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can 86068ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node. 86168ad4a33SUladzislau Rezki (Sony) */ 86268ad4a33SUladzislau Rezki (Sony) if (likely(parent)) { 86368ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list; 86468ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link) 86568ad4a33SUladzislau Rezki (Sony) head = head->prev; 86668ad4a33SUladzislau Rezki (Sony) } 867db64fe02SNick Piggin 86868ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */ 86968ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link); 87068ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) { 87168ad4a33SUladzislau Rezki (Sony) /* 87268ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion 87368ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to 87468ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented(). 87568ad4a33SUladzislau Rezki (Sony) * It is because of we populate the tree from the bottom 87668ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree. 87768ad4a33SUladzislau Rezki (Sony) * 87868ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion, 87968ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to 88068ad4a33SUladzislau Rezki (Sony) * the correct order later on. 88168ad4a33SUladzislau Rezki (Sony) */ 88268ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node, 88368ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 88468ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0; 88568ad4a33SUladzislau Rezki (Sony) } else { 88668ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root); 88768ad4a33SUladzislau Rezki (Sony) } 88868ad4a33SUladzislau Rezki (Sony) 88968ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */ 89068ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head); 89168ad4a33SUladzislau Rezki (Sony) } 89268ad4a33SUladzislau Rezki (Sony) 89368ad4a33SUladzislau Rezki (Sony) static __always_inline void 89468ad4a33SUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root) 89568ad4a33SUladzislau Rezki (Sony) { 896460e42d1SUladzislau Rezki (Sony) if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 897460e42d1SUladzislau Rezki (Sony) return; 898460e42d1SUladzislau Rezki (Sony) 89968ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) 90068ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node, 90168ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 90268ad4a33SUladzislau Rezki (Sony) else 90368ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root); 90468ad4a33SUladzislau Rezki (Sony) 90568ad4a33SUladzislau Rezki (Sony) list_del(&va->list); 90668ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node); 90768ad4a33SUladzislau Rezki (Sony) } 90868ad4a33SUladzislau Rezki (Sony) 909bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 910bb850f4dSUladzislau Rezki (Sony) static void 911da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(void) 912bb850f4dSUladzislau Rezki (Sony) { 913bb850f4dSUladzislau Rezki (Sony) struct vmap_area *va; 914da27c9edSUladzislau Rezki (Sony) unsigned long computed_size; 915bb850f4dSUladzislau Rezki (Sony) 916da27c9edSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) { 917da27c9edSUladzislau Rezki (Sony) computed_size = compute_subtree_max_size(va); 918da27c9edSUladzislau Rezki (Sony) if (computed_size != va->subtree_max_size) 919bb850f4dSUladzislau Rezki (Sony) pr_emerg("tree is corrupted: %lu, %lu\n", 920bb850f4dSUladzislau Rezki (Sony) va_size(va), va->subtree_max_size); 921bb850f4dSUladzislau Rezki (Sony) } 922bb850f4dSUladzislau Rezki (Sony) } 923bb850f4dSUladzislau Rezki (Sony) #endif 924bb850f4dSUladzislau Rezki (Sony) 92568ad4a33SUladzislau Rezki (Sony) /* 92668ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper 92768ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done 92868ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or 92968ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree. 93068ad4a33SUladzislau Rezki (Sony) * 93168ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called: 93268ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path); 93368ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path); 93468ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path). 93568ad4a33SUladzislau Rezki (Sony) * 93668ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes 93768ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up 93868ad4a33SUladzislau Rezki (Sony) * to the root node. 93968ad4a33SUladzislau Rezki (Sony) * 94068ad4a33SUladzislau Rezki (Sony) * 4--8 94168ad4a33SUladzislau Rezki (Sony) * /\ 94268ad4a33SUladzislau Rezki (Sony) * / \ 94368ad4a33SUladzislau Rezki (Sony) * / \ 94468ad4a33SUladzislau Rezki (Sony) * 2--2 8--8 94568ad4a33SUladzislau Rezki (Sony) * 94668ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then 94768ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1 94868ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink 94968ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent 95068ad4a33SUladzislau Rezki (Sony) * node becomes 4--6. 95168ad4a33SUladzislau Rezki (Sony) */ 95268ad4a33SUladzislau Rezki (Sony) static __always_inline void 95368ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va) 95468ad4a33SUladzislau Rezki (Sony) { 95568ad4a33SUladzislau Rezki (Sony) /* 95615ae144fSUladzislau Rezki (Sony) * Populate the tree from bottom towards the root until 95715ae144fSUladzislau Rezki (Sony) * the calculated maximum available size of checked node 95815ae144fSUladzislau Rezki (Sony) * is equal to its current one. 95968ad4a33SUladzislau Rezki (Sony) */ 96015ae144fSUladzislau Rezki (Sony) free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 961bb850f4dSUladzislau Rezki (Sony) 962bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 963da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(); 964bb850f4dSUladzislau Rezki (Sony) #endif 96568ad4a33SUladzislau Rezki (Sony) } 96668ad4a33SUladzislau Rezki (Sony) 96768ad4a33SUladzislau Rezki (Sony) static void 96868ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va, 96968ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 97068ad4a33SUladzislau Rezki (Sony) { 97168ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 97268ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 97368ad4a33SUladzislau Rezki (Sony) 97468ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 9759c801f61SUladzislau Rezki (Sony) if (link) 97668ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 97768ad4a33SUladzislau Rezki (Sony) } 97868ad4a33SUladzislau Rezki (Sony) 97968ad4a33SUladzislau Rezki (Sony) static void 98068ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va, 98168ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root, 98268ad4a33SUladzislau Rezki (Sony) struct list_head *head) 98368ad4a33SUladzislau Rezki (Sony) { 98468ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 98568ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 98668ad4a33SUladzislau Rezki (Sony) 98768ad4a33SUladzislau Rezki (Sony) if (from) 98868ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent); 98968ad4a33SUladzislau Rezki (Sony) else 99068ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 99168ad4a33SUladzislau Rezki (Sony) 9929c801f61SUladzislau Rezki (Sony) if (link) { 99368ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 99468ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 99568ad4a33SUladzislau Rezki (Sony) } 9969c801f61SUladzislau Rezki (Sony) } 99768ad4a33SUladzislau Rezki (Sony) 99868ad4a33SUladzislau Rezki (Sony) /* 99968ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous 100068ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new 100168ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is 100268ad4a33SUladzislau Rezki (Sony) * freed. 10039c801f61SUladzislau Rezki (Sony) * 10049c801f61SUladzislau Rezki (Sony) * Please note, it can return NULL in case of overlap 10059c801f61SUladzislau Rezki (Sony) * ranges, followed by WARN() report. Despite it is a 10069c801f61SUladzislau Rezki (Sony) * buggy behaviour, a system can be alive and keep 10079c801f61SUladzislau Rezki (Sony) * ongoing. 100868ad4a33SUladzislau Rezki (Sony) */ 10093c5c3cfbSDaniel Axtens static __always_inline struct vmap_area * 101068ad4a33SUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va, 101168ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 101268ad4a33SUladzislau Rezki (Sony) { 101368ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling; 101468ad4a33SUladzislau Rezki (Sony) struct list_head *next; 101568ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 101668ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 101768ad4a33SUladzislau Rezki (Sony) bool merged = false; 101868ad4a33SUladzislau Rezki (Sony) 101968ad4a33SUladzislau Rezki (Sony) /* 102068ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be 102168ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings. 102268ad4a33SUladzislau Rezki (Sony) */ 102368ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 10249c801f61SUladzislau Rezki (Sony) if (!link) 10259c801f61SUladzislau Rezki (Sony) return NULL; 102668ad4a33SUladzislau Rezki (Sony) 102768ad4a33SUladzislau Rezki (Sony) /* 102868ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done. 102968ad4a33SUladzislau Rezki (Sony) */ 103068ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link); 103168ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL)) 103268ad4a33SUladzislau Rezki (Sony) goto insert; 103368ad4a33SUladzislau Rezki (Sony) 103468ad4a33SUladzislau Rezki (Sony) /* 103568ad4a33SUladzislau Rezki (Sony) * start end 103668ad4a33SUladzislau Rezki (Sony) * | | 103768ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->| 103868ad4a33SUladzislau Rezki (Sony) * | | 103968ad4a33SUladzislau Rezki (Sony) * start end 104068ad4a33SUladzislau Rezki (Sony) */ 104168ad4a33SUladzislau Rezki (Sony) if (next != head) { 104268ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list); 104368ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) { 104468ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start; 104568ad4a33SUladzislau Rezki (Sony) 104668ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 104768ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 104868ad4a33SUladzislau Rezki (Sony) 104968ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */ 105068ad4a33SUladzislau Rezki (Sony) va = sibling; 105168ad4a33SUladzislau Rezki (Sony) merged = true; 105268ad4a33SUladzislau Rezki (Sony) } 105368ad4a33SUladzislau Rezki (Sony) } 105468ad4a33SUladzislau Rezki (Sony) 105568ad4a33SUladzislau Rezki (Sony) /* 105668ad4a33SUladzislau Rezki (Sony) * start end 105768ad4a33SUladzislau Rezki (Sony) * | | 105868ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>| 105968ad4a33SUladzislau Rezki (Sony) * | | 106068ad4a33SUladzislau Rezki (Sony) * start end 106168ad4a33SUladzislau Rezki (Sony) */ 106268ad4a33SUladzislau Rezki (Sony) if (next->prev != head) { 106368ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list); 106468ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) { 10655dd78640SUladzislau Rezki (Sony) /* 10665dd78640SUladzislau Rezki (Sony) * If both neighbors are coalesced, it is important 10675dd78640SUladzislau Rezki (Sony) * to unlink the "next" node first, followed by merging 10685dd78640SUladzislau Rezki (Sony) * with "previous" one. Otherwise the tree might not be 10695dd78640SUladzislau Rezki (Sony) * fully populated if a sibling's augmented value is 10705dd78640SUladzislau Rezki (Sony) * "normalized" because of rotation operations. 10715dd78640SUladzislau Rezki (Sony) */ 107254f63d9dSUladzislau Rezki (Sony) if (merged) 107368ad4a33SUladzislau Rezki (Sony) unlink_va(va, root); 107468ad4a33SUladzislau Rezki (Sony) 10755dd78640SUladzislau Rezki (Sony) sibling->va_end = va->va_end; 10765dd78640SUladzislau Rezki (Sony) 107768ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 107868ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 10793c5c3cfbSDaniel Axtens 10803c5c3cfbSDaniel Axtens /* Point to the new merged area. */ 10813c5c3cfbSDaniel Axtens va = sibling; 10823c5c3cfbSDaniel Axtens merged = true; 108368ad4a33SUladzislau Rezki (Sony) } 108468ad4a33SUladzislau Rezki (Sony) } 108568ad4a33SUladzislau Rezki (Sony) 108668ad4a33SUladzislau Rezki (Sony) insert: 10875dd78640SUladzislau Rezki (Sony) if (!merged) 108868ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 10893c5c3cfbSDaniel Axtens 109096e2db45SUladzislau Rezki (Sony) return va; 109196e2db45SUladzislau Rezki (Sony) } 109296e2db45SUladzislau Rezki (Sony) 109396e2db45SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 109496e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(struct vmap_area *va, 109596e2db45SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 109696e2db45SUladzislau Rezki (Sony) { 109796e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area(va, root, head); 109896e2db45SUladzislau Rezki (Sony) if (va) 10995dd78640SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 110096e2db45SUladzislau Rezki (Sony) 11013c5c3cfbSDaniel Axtens return va; 110268ad4a33SUladzislau Rezki (Sony) } 110368ad4a33SUladzislau Rezki (Sony) 110468ad4a33SUladzislau Rezki (Sony) static __always_inline bool 110568ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size, 110668ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 110768ad4a33SUladzislau Rezki (Sony) { 110868ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 110968ad4a33SUladzislau Rezki (Sony) 111068ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 111168ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 111268ad4a33SUladzislau Rezki (Sony) else 111368ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 111468ad4a33SUladzislau Rezki (Sony) 111568ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */ 111668ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr || 111768ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart) 111868ad4a33SUladzislau Rezki (Sony) return false; 111968ad4a33SUladzislau Rezki (Sony) 112068ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end); 112168ad4a33SUladzislau Rezki (Sony) } 112268ad4a33SUladzislau Rezki (Sony) 112368ad4a33SUladzislau Rezki (Sony) /* 112468ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree, 112568ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing 112668ad4a33SUladzislau Rezki (Sony) * parameters. 112768ad4a33SUladzislau Rezki (Sony) */ 112868ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 112968ad4a33SUladzislau Rezki (Sony) find_vmap_lowest_match(unsigned long size, 113068ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 113168ad4a33SUladzislau Rezki (Sony) { 113268ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 113368ad4a33SUladzislau Rezki (Sony) struct rb_node *node; 113468ad4a33SUladzislau Rezki (Sony) unsigned long length; 113568ad4a33SUladzislau Rezki (Sony) 113668ad4a33SUladzislau Rezki (Sony) /* Start from the root. */ 113768ad4a33SUladzislau Rezki (Sony) node = free_vmap_area_root.rb_node; 113868ad4a33SUladzislau Rezki (Sony) 113968ad4a33SUladzislau Rezki (Sony) /* Adjust the search size for alignment overhead. */ 114068ad4a33SUladzislau Rezki (Sony) length = size + align - 1; 114168ad4a33SUladzislau Rezki (Sony) 114268ad4a33SUladzislau Rezki (Sony) while (node) { 114368ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 114468ad4a33SUladzislau Rezki (Sony) 114568ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_left) >= length && 114668ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) { 114768ad4a33SUladzislau Rezki (Sony) node = node->rb_left; 114868ad4a33SUladzislau Rezki (Sony) } else { 114968ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 115068ad4a33SUladzislau Rezki (Sony) return va; 115168ad4a33SUladzislau Rezki (Sony) 115268ad4a33SUladzislau Rezki (Sony) /* 115368ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right 115468ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is 115568ad4a33SUladzislau Rezki (Sony) * equal or bigger to the requested search length. 115668ad4a33SUladzislau Rezki (Sony) */ 115768ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length) { 115868ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 115968ad4a33SUladzislau Rezki (Sony) continue; 116068ad4a33SUladzislau Rezki (Sony) } 116168ad4a33SUladzislau Rezki (Sony) 116268ad4a33SUladzislau Rezki (Sony) /* 11633806b041SAndrew Morton * OK. We roll back and find the first right sub-tree, 116468ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen 116568ad4a33SUladzislau Rezki (Sony) * only once due to "vstart" restriction. 116668ad4a33SUladzislau Rezki (Sony) */ 116768ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) { 116868ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 116968ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 117068ad4a33SUladzislau Rezki (Sony) return va; 117168ad4a33SUladzislau Rezki (Sony) 117268ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length && 117368ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) { 117468ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 117568ad4a33SUladzislau Rezki (Sony) break; 117668ad4a33SUladzislau Rezki (Sony) } 117768ad4a33SUladzislau Rezki (Sony) } 117868ad4a33SUladzislau Rezki (Sony) } 117968ad4a33SUladzislau Rezki (Sony) } 118068ad4a33SUladzislau Rezki (Sony) 118168ad4a33SUladzislau Rezki (Sony) return NULL; 118268ad4a33SUladzislau Rezki (Sony) } 118368ad4a33SUladzislau Rezki (Sony) 1184a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1185a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h> 1186a6cf4e0fSUladzislau Rezki (Sony) 1187a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area * 1188a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_linear_match(unsigned long size, 1189a6cf4e0fSUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 1190a6cf4e0fSUladzislau Rezki (Sony) { 1191a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va; 1192a6cf4e0fSUladzislau Rezki (Sony) 1193a6cf4e0fSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) { 1194a6cf4e0fSUladzislau Rezki (Sony) if (!is_within_this_va(va, size, align, vstart)) 1195a6cf4e0fSUladzislau Rezki (Sony) continue; 1196a6cf4e0fSUladzislau Rezki (Sony) 1197a6cf4e0fSUladzislau Rezki (Sony) return va; 1198a6cf4e0fSUladzislau Rezki (Sony) } 1199a6cf4e0fSUladzislau Rezki (Sony) 1200a6cf4e0fSUladzislau Rezki (Sony) return NULL; 1201a6cf4e0fSUladzislau Rezki (Sony) } 1202a6cf4e0fSUladzislau Rezki (Sony) 1203a6cf4e0fSUladzislau Rezki (Sony) static void 1204a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_match_check(unsigned long size) 1205a6cf4e0fSUladzislau Rezki (Sony) { 1206a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va_1, *va_2; 1207a6cf4e0fSUladzislau Rezki (Sony) unsigned long vstart; 1208a6cf4e0fSUladzislau Rezki (Sony) unsigned int rnd; 1209a6cf4e0fSUladzislau Rezki (Sony) 1210a6cf4e0fSUladzislau Rezki (Sony) get_random_bytes(&rnd, sizeof(rnd)); 1211a6cf4e0fSUladzislau Rezki (Sony) vstart = VMALLOC_START + rnd; 1212a6cf4e0fSUladzislau Rezki (Sony) 1213a6cf4e0fSUladzislau Rezki (Sony) va_1 = find_vmap_lowest_match(size, 1, vstart); 1214a6cf4e0fSUladzislau Rezki (Sony) va_2 = find_vmap_lowest_linear_match(size, 1, vstart); 1215a6cf4e0fSUladzislau Rezki (Sony) 1216a6cf4e0fSUladzislau Rezki (Sony) if (va_1 != va_2) 1217a6cf4e0fSUladzislau Rezki (Sony) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1218a6cf4e0fSUladzislau Rezki (Sony) va_1, va_2, vstart); 1219a6cf4e0fSUladzislau Rezki (Sony) } 1220a6cf4e0fSUladzislau Rezki (Sony) #endif 1221a6cf4e0fSUladzislau Rezki (Sony) 122268ad4a33SUladzislau Rezki (Sony) enum fit_type { 122368ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0, 122468ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */ 122568ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */ 122668ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */ 122768ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */ 122868ad4a33SUladzislau Rezki (Sony) }; 122968ad4a33SUladzislau Rezki (Sony) 123068ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type 123168ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va, 123268ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size) 123368ad4a33SUladzislau Rezki (Sony) { 123468ad4a33SUladzislau Rezki (Sony) enum fit_type type; 123568ad4a33SUladzislau Rezki (Sony) 123668ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */ 123768ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start || 123868ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end) 123968ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT; 124068ad4a33SUladzislau Rezki (Sony) 124168ad4a33SUladzislau Rezki (Sony) /* Now classify. */ 124268ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) { 124368ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size) 124468ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE; 124568ad4a33SUladzislau Rezki (Sony) else 124668ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE; 124768ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) { 124868ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE; 124968ad4a33SUladzislau Rezki (Sony) } else { 125068ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE; 125168ad4a33SUladzislau Rezki (Sony) } 125268ad4a33SUladzislau Rezki (Sony) 125368ad4a33SUladzislau Rezki (Sony) return type; 125468ad4a33SUladzislau Rezki (Sony) } 125568ad4a33SUladzislau Rezki (Sony) 125668ad4a33SUladzislau Rezki (Sony) static __always_inline int 125768ad4a33SUladzislau Rezki (Sony) adjust_va_to_fit_type(struct vmap_area *va, 125868ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size, 125968ad4a33SUladzislau Rezki (Sony) enum fit_type type) 126068ad4a33SUladzislau Rezki (Sony) { 12612c929233SArnd Bergmann struct vmap_area *lva = NULL; 126268ad4a33SUladzislau Rezki (Sony) 126368ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) { 126468ad4a33SUladzislau Rezki (Sony) /* 126568ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits. 126668ad4a33SUladzislau Rezki (Sony) * 126768ad4a33SUladzislau Rezki (Sony) * | | 126868ad4a33SUladzislau Rezki (Sony) * V NVA V 126968ad4a33SUladzislau Rezki (Sony) * |---------------| 127068ad4a33SUladzislau Rezki (Sony) */ 127168ad4a33SUladzislau Rezki (Sony) unlink_va(va, &free_vmap_area_root); 127268ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 127368ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) { 127468ad4a33SUladzislau Rezki (Sony) /* 127568ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA. 127668ad4a33SUladzislau Rezki (Sony) * 127768ad4a33SUladzislau Rezki (Sony) * | | 127868ad4a33SUladzislau Rezki (Sony) * V NVA V R 127968ad4a33SUladzislau Rezki (Sony) * |-------|-------| 128068ad4a33SUladzislau Rezki (Sony) */ 128168ad4a33SUladzislau Rezki (Sony) va->va_start += size; 128268ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) { 128368ad4a33SUladzislau Rezki (Sony) /* 128468ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA. 128568ad4a33SUladzislau Rezki (Sony) * 128668ad4a33SUladzislau Rezki (Sony) * | | 128768ad4a33SUladzislau Rezki (Sony) * L V NVA V 128868ad4a33SUladzislau Rezki (Sony) * |-------|-------| 128968ad4a33SUladzislau Rezki (Sony) */ 129068ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr; 129168ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) { 129268ad4a33SUladzislau Rezki (Sony) /* 129368ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA. 129468ad4a33SUladzislau Rezki (Sony) * 129568ad4a33SUladzislau Rezki (Sony) * | | 129668ad4a33SUladzislau Rezki (Sony) * L V NVA V R 129768ad4a33SUladzislau Rezki (Sony) * |---|-------|---| 129868ad4a33SUladzislau Rezki (Sony) */ 129982dd23e8SUladzislau Rezki (Sony) lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 130082dd23e8SUladzislau Rezki (Sony) if (unlikely(!lva)) { 130182dd23e8SUladzislau Rezki (Sony) /* 130282dd23e8SUladzislau Rezki (Sony) * For percpu allocator we do not do any pre-allocation 130382dd23e8SUladzislau Rezki (Sony) * and leave it as it is. The reason is it most likely 130482dd23e8SUladzislau Rezki (Sony) * never ends up with NE_FIT_TYPE splitting. In case of 130582dd23e8SUladzislau Rezki (Sony) * percpu allocations offsets and sizes are aligned to 130682dd23e8SUladzislau Rezki (Sony) * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 130782dd23e8SUladzislau Rezki (Sony) * are its main fitting cases. 130882dd23e8SUladzislau Rezki (Sony) * 130982dd23e8SUladzislau Rezki (Sony) * There are a few exceptions though, as an example it is 131082dd23e8SUladzislau Rezki (Sony) * a first allocation (early boot up) when we have "one" 131182dd23e8SUladzislau Rezki (Sony) * big free space that has to be split. 1312060650a2SUladzislau Rezki (Sony) * 1313060650a2SUladzislau Rezki (Sony) * Also we can hit this path in case of regular "vmap" 1314060650a2SUladzislau Rezki (Sony) * allocations, if "this" current CPU was not preloaded. 1315060650a2SUladzislau Rezki (Sony) * See the comment in alloc_vmap_area() why. If so, then 1316060650a2SUladzislau Rezki (Sony) * GFP_NOWAIT is used instead to get an extra object for 1317060650a2SUladzislau Rezki (Sony) * split purpose. That is rare and most time does not 1318060650a2SUladzislau Rezki (Sony) * occur. 1319060650a2SUladzislau Rezki (Sony) * 1320060650a2SUladzislau Rezki (Sony) * What happens if an allocation gets failed. Basically, 1321060650a2SUladzislau Rezki (Sony) * an "overflow" path is triggered to purge lazily freed 1322060650a2SUladzislau Rezki (Sony) * areas to free some memory, then, the "retry" path is 1323060650a2SUladzislau Rezki (Sony) * triggered to repeat one more time. See more details 1324060650a2SUladzislau Rezki (Sony) * in alloc_vmap_area() function. 132582dd23e8SUladzislau Rezki (Sony) */ 132668ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 132782dd23e8SUladzislau Rezki (Sony) if (!lva) 132868ad4a33SUladzislau Rezki (Sony) return -1; 132982dd23e8SUladzislau Rezki (Sony) } 133068ad4a33SUladzislau Rezki (Sony) 133168ad4a33SUladzislau Rezki (Sony) /* 133268ad4a33SUladzislau Rezki (Sony) * Build the remainder. 133368ad4a33SUladzislau Rezki (Sony) */ 133468ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start; 133568ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr; 133668ad4a33SUladzislau Rezki (Sony) 133768ad4a33SUladzislau Rezki (Sony) /* 133868ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size. 133968ad4a33SUladzislau Rezki (Sony) */ 134068ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size; 134168ad4a33SUladzislau Rezki (Sony) } else { 134268ad4a33SUladzislau Rezki (Sony) return -1; 134368ad4a33SUladzislau Rezki (Sony) } 134468ad4a33SUladzislau Rezki (Sony) 134568ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) { 134668ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 134768ad4a33SUladzislau Rezki (Sony) 13482c929233SArnd Bergmann if (lva) /* type == NE_FIT_TYPE */ 134968ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, 135068ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list); 135168ad4a33SUladzislau Rezki (Sony) } 135268ad4a33SUladzislau Rezki (Sony) 135368ad4a33SUladzislau Rezki (Sony) return 0; 135468ad4a33SUladzislau Rezki (Sony) } 135568ad4a33SUladzislau Rezki (Sony) 135668ad4a33SUladzislau Rezki (Sony) /* 135768ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success. 135868ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure. 135968ad4a33SUladzislau Rezki (Sony) */ 136068ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 136168ad4a33SUladzislau Rezki (Sony) __alloc_vmap_area(unsigned long size, unsigned long align, 1362cacca6baSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend) 136368ad4a33SUladzislau Rezki (Sony) { 136468ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 136568ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 136668ad4a33SUladzislau Rezki (Sony) enum fit_type type; 136768ad4a33SUladzislau Rezki (Sony) int ret; 136868ad4a33SUladzislau Rezki (Sony) 136968ad4a33SUladzislau Rezki (Sony) va = find_vmap_lowest_match(size, align, vstart); 137068ad4a33SUladzislau Rezki (Sony) if (unlikely(!va)) 137168ad4a33SUladzislau Rezki (Sony) return vend; 137268ad4a33SUladzislau Rezki (Sony) 137368ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 137468ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 137568ad4a33SUladzislau Rezki (Sony) else 137668ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 137768ad4a33SUladzislau Rezki (Sony) 137868ad4a33SUladzislau Rezki (Sony) /* Check the "vend" restriction. */ 137968ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size > vend) 138068ad4a33SUladzislau Rezki (Sony) return vend; 138168ad4a33SUladzislau Rezki (Sony) 138268ad4a33SUladzislau Rezki (Sony) /* Classify what we have found. */ 138368ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, nva_start_addr, size); 138468ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 138568ad4a33SUladzislau Rezki (Sony) return vend; 138668ad4a33SUladzislau Rezki (Sony) 138768ad4a33SUladzislau Rezki (Sony) /* Update the free vmap_area. */ 138868ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); 138968ad4a33SUladzislau Rezki (Sony) if (ret) 139068ad4a33SUladzislau Rezki (Sony) return vend; 139168ad4a33SUladzislau Rezki (Sony) 1392a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1393a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_match_check(size); 1394a6cf4e0fSUladzislau Rezki (Sony) #endif 1395a6cf4e0fSUladzislau Rezki (Sony) 139668ad4a33SUladzislau Rezki (Sony) return nva_start_addr; 139768ad4a33SUladzislau Rezki (Sony) } 13984da56b99SChris Wilson 1399db64fe02SNick Piggin /* 1400d98c9e83SAndrey Ryabinin * Free a region of KVA allocated by alloc_vmap_area 1401d98c9e83SAndrey Ryabinin */ 1402d98c9e83SAndrey Ryabinin static void free_vmap_area(struct vmap_area *va) 1403d98c9e83SAndrey Ryabinin { 1404d98c9e83SAndrey Ryabinin /* 1405d98c9e83SAndrey Ryabinin * Remove from the busy tree/list. 1406d98c9e83SAndrey Ryabinin */ 1407d98c9e83SAndrey Ryabinin spin_lock(&vmap_area_lock); 1408d98c9e83SAndrey Ryabinin unlink_va(va, &vmap_area_root); 1409d98c9e83SAndrey Ryabinin spin_unlock(&vmap_area_lock); 1410d98c9e83SAndrey Ryabinin 1411d98c9e83SAndrey Ryabinin /* 1412d98c9e83SAndrey Ryabinin * Insert/Merge it back to the free tree/list. 1413d98c9e83SAndrey Ryabinin */ 1414d98c9e83SAndrey Ryabinin spin_lock(&free_vmap_area_lock); 141596e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1416d98c9e83SAndrey Ryabinin spin_unlock(&free_vmap_area_lock); 1417d98c9e83SAndrey Ryabinin } 1418d98c9e83SAndrey Ryabinin 1419d98c9e83SAndrey Ryabinin /* 1420db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 1421db64fe02SNick Piggin * vstart and vend. 1422db64fe02SNick Piggin */ 1423db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 1424db64fe02SNick Piggin unsigned long align, 1425db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 1426db64fe02SNick Piggin int node, gfp_t gfp_mask) 1427db64fe02SNick Piggin { 142882dd23e8SUladzislau Rezki (Sony) struct vmap_area *va, *pva; 14291da177e4SLinus Torvalds unsigned long addr; 1430db64fe02SNick Piggin int purged = 0; 1431d98c9e83SAndrey Ryabinin int ret; 1432db64fe02SNick Piggin 14337766970cSNick Piggin BUG_ON(!size); 1434891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 143589699605SNick Piggin BUG_ON(!is_power_of_2(align)); 1436db64fe02SNick Piggin 143768ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized)) 143868ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY); 143968ad4a33SUladzislau Rezki (Sony) 14405803ed29SChristoph Hellwig might_sleep(); 1441f07116d7SUladzislau Rezki (Sony) gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 14424da56b99SChris Wilson 1443f07116d7SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1444db64fe02SNick Piggin if (unlikely(!va)) 1445db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1446db64fe02SNick Piggin 14477f88f88fSCatalin Marinas /* 14487f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 14497f88f88fSCatalin Marinas * to avoid false negatives. 14507f88f88fSCatalin Marinas */ 1451f07116d7SUladzislau Rezki (Sony) kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 14527f88f88fSCatalin Marinas 1453db64fe02SNick Piggin retry: 145482dd23e8SUladzislau Rezki (Sony) /* 145581f1ba58SUladzislau Rezki (Sony) * Preload this CPU with one extra vmap_area object. It is used 145681f1ba58SUladzislau Rezki (Sony) * when fit type of free area is NE_FIT_TYPE. Please note, it 145781f1ba58SUladzislau Rezki (Sony) * does not guarantee that an allocation occurs on a CPU that 145881f1ba58SUladzislau Rezki (Sony) * is preloaded, instead we minimize the case when it is not. 145981f1ba58SUladzislau Rezki (Sony) * It can happen because of cpu migration, because there is a 146081f1ba58SUladzislau Rezki (Sony) * race until the below spinlock is taken. 146182dd23e8SUladzislau Rezki (Sony) * 146282dd23e8SUladzislau Rezki (Sony) * The preload is done in non-atomic context, thus it allows us 146382dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks to be more stable under 146481f1ba58SUladzislau Rezki (Sony) * low memory condition and high memory pressure. In rare case, 146581f1ba58SUladzislau Rezki (Sony) * if not preloaded, GFP_NOWAIT is used. 146682dd23e8SUladzislau Rezki (Sony) * 146781f1ba58SUladzislau Rezki (Sony) * Set "pva" to NULL here, because of "retry" path. 146882dd23e8SUladzislau Rezki (Sony) */ 146981f1ba58SUladzislau Rezki (Sony) pva = NULL; 147082dd23e8SUladzislau Rezki (Sony) 147181f1ba58SUladzislau Rezki (Sony) if (!this_cpu_read(ne_fit_preload_node)) 147281f1ba58SUladzislau Rezki (Sony) /* 147381f1ba58SUladzislau Rezki (Sony) * Even if it fails we do not really care about that. 147481f1ba58SUladzislau Rezki (Sony) * Just proceed as it is. If needed "overflow" path 147581f1ba58SUladzislau Rezki (Sony) * will refill the cache we allocate from. 147681f1ba58SUladzislau Rezki (Sony) */ 1477f07116d7SUladzislau Rezki (Sony) pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 147882dd23e8SUladzislau Rezki (Sony) 1479e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 148081f1ba58SUladzislau Rezki (Sony) 148181f1ba58SUladzislau Rezki (Sony) if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) 148281f1ba58SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, pva); 148368ad4a33SUladzislau Rezki (Sony) 148489699605SNick Piggin /* 148568ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is 148668ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path. 148789699605SNick Piggin */ 1488cacca6baSUladzislau Rezki (Sony) addr = __alloc_vmap_area(size, align, vstart, vend); 1489e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 1490e36176beSUladzislau Rezki (Sony) 149168ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend)) 149289699605SNick Piggin goto overflow; 149389699605SNick Piggin 149489699605SNick Piggin va->va_start = addr; 149589699605SNick Piggin va->va_end = addr + size; 1496688fcbfcSPengfei Li va->vm = NULL; 149768ad4a33SUladzislau Rezki (Sony) 1498d98c9e83SAndrey Ryabinin 1499e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 1500e36176beSUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 150189699605SNick Piggin spin_unlock(&vmap_area_lock); 150289699605SNick Piggin 150361e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 150489699605SNick Piggin BUG_ON(va->va_start < vstart); 150589699605SNick Piggin BUG_ON(va->va_end > vend); 150689699605SNick Piggin 1507d98c9e83SAndrey Ryabinin ret = kasan_populate_vmalloc(addr, size); 1508d98c9e83SAndrey Ryabinin if (ret) { 1509d98c9e83SAndrey Ryabinin free_vmap_area(va); 1510d98c9e83SAndrey Ryabinin return ERR_PTR(ret); 1511d98c9e83SAndrey Ryabinin } 1512d98c9e83SAndrey Ryabinin 151389699605SNick Piggin return va; 151489699605SNick Piggin 15157766970cSNick Piggin overflow: 1516db64fe02SNick Piggin if (!purged) { 1517db64fe02SNick Piggin purge_vmap_area_lazy(); 1518db64fe02SNick Piggin purged = 1; 1519db64fe02SNick Piggin goto retry; 1520db64fe02SNick Piggin } 15214da56b99SChris Wilson 15224da56b99SChris Wilson if (gfpflags_allow_blocking(gfp_mask)) { 15234da56b99SChris Wilson unsigned long freed = 0; 15244da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 15254da56b99SChris Wilson if (freed > 0) { 15264da56b99SChris Wilson purged = 0; 15274da56b99SChris Wilson goto retry; 15284da56b99SChris Wilson } 15294da56b99SChris Wilson } 15304da56b99SChris Wilson 153103497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1532756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1533756a025fSJoe Perches size); 153468ad4a33SUladzislau Rezki (Sony) 153568ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1536db64fe02SNick Piggin return ERR_PTR(-EBUSY); 1537db64fe02SNick Piggin } 1538db64fe02SNick Piggin 15394da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 15404da56b99SChris Wilson { 15414da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 15424da56b99SChris Wilson } 15434da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 15444da56b99SChris Wilson 15454da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 15464da56b99SChris Wilson { 15474da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 15484da56b99SChris Wilson } 15494da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 15504da56b99SChris Wilson 1551db64fe02SNick Piggin /* 1552db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 1553db64fe02SNick Piggin * before attempting to purge with a TLB flush. 1554db64fe02SNick Piggin * 1555db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 1556db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 1557db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 1558db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 1559db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 1560db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 1561db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 1562db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 1563db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 1564db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 1565db64fe02SNick Piggin * becomes a problem on bigger systems. 1566db64fe02SNick Piggin */ 1567db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 1568db64fe02SNick Piggin { 1569db64fe02SNick Piggin unsigned int log; 1570db64fe02SNick Piggin 1571db64fe02SNick Piggin log = fls(num_online_cpus()); 1572db64fe02SNick Piggin 1573db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1574db64fe02SNick Piggin } 1575db64fe02SNick Piggin 15764d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1577db64fe02SNick Piggin 15780574ecd1SChristoph Hellwig /* 15790574ecd1SChristoph Hellwig * Serialize vmap purging. There is no actual criticial section protected 15800574ecd1SChristoph Hellwig * by this look, but we want to avoid concurrent calls for performance 15810574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 15820574ecd1SChristoph Hellwig */ 1583f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 15840574ecd1SChristoph Hellwig 158502b709dfSNick Piggin /* for per-CPU blocks */ 158602b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 158702b709dfSNick Piggin 1588db64fe02SNick Piggin /* 15893ee48b6aSCliff Wickman * called before a call to iounmap() if the caller wants vm_area_struct's 15903ee48b6aSCliff Wickman * immediately freed. 15913ee48b6aSCliff Wickman */ 15923ee48b6aSCliff Wickman void set_iounmap_nonlazy(void) 15933ee48b6aSCliff Wickman { 15944d36e6f8SUladzislau Rezki (Sony) atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); 15953ee48b6aSCliff Wickman } 15963ee48b6aSCliff Wickman 15973ee48b6aSCliff Wickman /* 1598db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 1599db64fe02SNick Piggin */ 16000574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 1601db64fe02SNick Piggin { 16024d36e6f8SUladzislau Rezki (Sony) unsigned long resched_threshold; 160396e2db45SUladzislau Rezki (Sony) struct list_head local_pure_list; 160496e2db45SUladzislau Rezki (Sony) struct vmap_area *va, *n_va; 1605db64fe02SNick Piggin 16060574ecd1SChristoph Hellwig lockdep_assert_held(&vmap_purge_lock); 160702b709dfSNick Piggin 160896e2db45SUladzislau Rezki (Sony) spin_lock(&purge_vmap_area_lock); 160996e2db45SUladzislau Rezki (Sony) purge_vmap_area_root = RB_ROOT; 161096e2db45SUladzislau Rezki (Sony) list_replace_init(&purge_vmap_area_list, &local_pure_list); 161196e2db45SUladzislau Rezki (Sony) spin_unlock(&purge_vmap_area_lock); 161296e2db45SUladzislau Rezki (Sony) 161396e2db45SUladzislau Rezki (Sony) if (unlikely(list_empty(&local_pure_list))) 161468571be9SUladzislau Rezki (Sony) return false; 161568571be9SUladzislau Rezki (Sony) 161696e2db45SUladzislau Rezki (Sony) start = min(start, 161796e2db45SUladzislau Rezki (Sony) list_first_entry(&local_pure_list, 161896e2db45SUladzislau Rezki (Sony) struct vmap_area, list)->va_start); 161996e2db45SUladzislau Rezki (Sony) 162096e2db45SUladzislau Rezki (Sony) end = max(end, 162196e2db45SUladzislau Rezki (Sony) list_last_entry(&local_pure_list, 162296e2db45SUladzislau Rezki (Sony) struct vmap_area, list)->va_end); 1623db64fe02SNick Piggin 16240574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 16254d36e6f8SUladzislau Rezki (Sony) resched_threshold = lazy_max_pages() << 1; 1626db64fe02SNick Piggin 1627e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 162896e2db45SUladzislau Rezki (Sony) list_for_each_entry_safe(va, n_va, &local_pure_list, list) { 16294d36e6f8SUladzislau Rezki (Sony) unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 16303c5c3cfbSDaniel Axtens unsigned long orig_start = va->va_start; 16313c5c3cfbSDaniel Axtens unsigned long orig_end = va->va_end; 1632763b218dSJoel Fernandes 1633dd3b8353SUladzislau Rezki (Sony) /* 1634dd3b8353SUladzislau Rezki (Sony) * Finally insert or merge lazily-freed area. It is 1635dd3b8353SUladzislau Rezki (Sony) * detached and there is no need to "unlink" it from 1636dd3b8353SUladzislau Rezki (Sony) * anything. 1637dd3b8353SUladzislau Rezki (Sony) */ 163896e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root, 16393c5c3cfbSDaniel Axtens &free_vmap_area_list); 16403c5c3cfbSDaniel Axtens 16419c801f61SUladzislau Rezki (Sony) if (!va) 16429c801f61SUladzislau Rezki (Sony) continue; 16439c801f61SUladzislau Rezki (Sony) 16443c5c3cfbSDaniel Axtens if (is_vmalloc_or_module_addr((void *)orig_start)) 16453c5c3cfbSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 16463c5c3cfbSDaniel Axtens va->va_start, va->va_end); 1647dd3b8353SUladzislau Rezki (Sony) 16484d36e6f8SUladzislau Rezki (Sony) atomic_long_sub(nr, &vmap_lazy_nr); 164968571be9SUladzislau Rezki (Sony) 16504d36e6f8SUladzislau Rezki (Sony) if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) 1651e36176beSUladzislau Rezki (Sony) cond_resched_lock(&free_vmap_area_lock); 1652763b218dSJoel Fernandes } 1653e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 16540574ecd1SChristoph Hellwig return true; 1655db64fe02SNick Piggin } 1656db64fe02SNick Piggin 1657db64fe02SNick Piggin /* 1658496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 1659496850e5SNick Piggin * is already purging. 1660496850e5SNick Piggin */ 1661496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 1662496850e5SNick Piggin { 1663f9e09977SChristoph Hellwig if (mutex_trylock(&vmap_purge_lock)) { 16640574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1665f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 16660574ecd1SChristoph Hellwig } 1667496850e5SNick Piggin } 1668496850e5SNick Piggin 1669496850e5SNick Piggin /* 1670db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 1671db64fe02SNick Piggin */ 1672db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 1673db64fe02SNick Piggin { 1674f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 16750574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 16760574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1677f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1678db64fe02SNick Piggin } 1679db64fe02SNick Piggin 1680db64fe02SNick Piggin /* 168164141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 168264141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 168364141da5SJeremy Fitzhardinge * previously. 1684db64fe02SNick Piggin */ 168564141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 1686db64fe02SNick Piggin { 16874d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy; 168880c4bd7aSChris Wilson 1689dd3b8353SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 1690dd3b8353SUladzislau Rezki (Sony) unlink_va(va, &vmap_area_root); 1691dd3b8353SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 1692dd3b8353SUladzislau Rezki (Sony) 16934d36e6f8SUladzislau Rezki (Sony) nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 16944d36e6f8SUladzislau Rezki (Sony) PAGE_SHIFT, &vmap_lazy_nr); 169580c4bd7aSChris Wilson 169696e2db45SUladzislau Rezki (Sony) /* 169796e2db45SUladzislau Rezki (Sony) * Merge or place it to the purge tree/list. 169896e2db45SUladzislau Rezki (Sony) */ 169996e2db45SUladzislau Rezki (Sony) spin_lock(&purge_vmap_area_lock); 170096e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area(va, 170196e2db45SUladzislau Rezki (Sony) &purge_vmap_area_root, &purge_vmap_area_list); 170296e2db45SUladzislau Rezki (Sony) spin_unlock(&purge_vmap_area_lock); 170380c4bd7aSChris Wilson 170496e2db45SUladzislau Rezki (Sony) /* After this point, we may free va at any time */ 170580c4bd7aSChris Wilson if (unlikely(nr_lazy > lazy_max_pages())) 1706496850e5SNick Piggin try_purge_vmap_area_lazy(); 1707db64fe02SNick Piggin } 1708db64fe02SNick Piggin 1709b29acbdcSNick Piggin /* 1710b29acbdcSNick Piggin * Free and unmap a vmap area 1711b29acbdcSNick Piggin */ 1712b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 1713b29acbdcSNick Piggin { 1714b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 1715855e57a1SChristoph Hellwig unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start); 17168e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 171782a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end); 171882a2e924SChintan Pandya 1719c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 1720b29acbdcSNick Piggin } 1721b29acbdcSNick Piggin 1722db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 1723db64fe02SNick Piggin { 1724db64fe02SNick Piggin struct vmap_area *va; 1725db64fe02SNick Piggin 1726db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1727db64fe02SNick Piggin va = __find_vmap_area(addr); 1728db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 1729db64fe02SNick Piggin 1730db64fe02SNick Piggin return va; 1731db64fe02SNick Piggin } 1732db64fe02SNick Piggin 1733db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 1734db64fe02SNick Piggin 1735db64fe02SNick Piggin /* 1736db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 1737db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 1738db64fe02SNick Piggin */ 1739db64fe02SNick Piggin /* 1740db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 1741db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 1742db64fe02SNick Piggin * instead (we just need a rough idea) 1743db64fe02SNick Piggin */ 1744db64fe02SNick Piggin #if BITS_PER_LONG == 32 1745db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 1746db64fe02SNick Piggin #else 1747db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 1748db64fe02SNick Piggin #endif 1749db64fe02SNick Piggin 1750db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 1751db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 1752db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 1753db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 1754db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 1755db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 1756f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 1757f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 1758db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 1759f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 1760db64fe02SNick Piggin 1761db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 1762db64fe02SNick Piggin 1763db64fe02SNick Piggin struct vmap_block_queue { 1764db64fe02SNick Piggin spinlock_t lock; 1765db64fe02SNick Piggin struct list_head free; 1766db64fe02SNick Piggin }; 1767db64fe02SNick Piggin 1768db64fe02SNick Piggin struct vmap_block { 1769db64fe02SNick Piggin spinlock_t lock; 1770db64fe02SNick Piggin struct vmap_area *va; 1771db64fe02SNick Piggin unsigned long free, dirty; 17727d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 1773db64fe02SNick Piggin struct list_head free_list; 1774db64fe02SNick Piggin struct rcu_head rcu_head; 177502b709dfSNick Piggin struct list_head purge; 1776db64fe02SNick Piggin }; 1777db64fe02SNick Piggin 1778db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 1779db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 1780db64fe02SNick Piggin 1781db64fe02SNick Piggin /* 17820f14599cSMatthew Wilcox (Oracle) * XArray of vmap blocks, indexed by address, to quickly find a vmap block 1783db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 1784db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 1785db64fe02SNick Piggin */ 17860f14599cSMatthew Wilcox (Oracle) static DEFINE_XARRAY(vmap_blocks); 1787db64fe02SNick Piggin 1788db64fe02SNick Piggin /* 1789db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 1790db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 1791db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 1792db64fe02SNick Piggin * big problem. 1793db64fe02SNick Piggin */ 1794db64fe02SNick Piggin 1795db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 1796db64fe02SNick Piggin { 1797db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 1798db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 1799db64fe02SNick Piggin return addr; 1800db64fe02SNick Piggin } 1801db64fe02SNick Piggin 1802cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 1803cf725ce2SRoman Pen { 1804cf725ce2SRoman Pen unsigned long addr; 1805cf725ce2SRoman Pen 1806cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 1807cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 1808cf725ce2SRoman Pen return (void *)addr; 1809cf725ce2SRoman Pen } 1810cf725ce2SRoman Pen 1811cf725ce2SRoman Pen /** 1812cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 1813cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 1814cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 1815cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 1816cf725ce2SRoman Pen * 1817a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 1818cf725ce2SRoman Pen */ 1819cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 1820db64fe02SNick Piggin { 1821db64fe02SNick Piggin struct vmap_block_queue *vbq; 1822db64fe02SNick Piggin struct vmap_block *vb; 1823db64fe02SNick Piggin struct vmap_area *va; 1824db64fe02SNick Piggin unsigned long vb_idx; 1825db64fe02SNick Piggin int node, err; 1826cf725ce2SRoman Pen void *vaddr; 1827db64fe02SNick Piggin 1828db64fe02SNick Piggin node = numa_node_id(); 1829db64fe02SNick Piggin 1830db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 1831db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 1832db64fe02SNick Piggin if (unlikely(!vb)) 1833db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1834db64fe02SNick Piggin 1835db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 1836db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 1837db64fe02SNick Piggin node, gfp_mask); 1838ddf9c6d4STobias Klauser if (IS_ERR(va)) { 1839db64fe02SNick Piggin kfree(vb); 1840e7d86340SJulia Lawall return ERR_CAST(va); 1841db64fe02SNick Piggin } 1842db64fe02SNick Piggin 1843cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 1844db64fe02SNick Piggin spin_lock_init(&vb->lock); 1845db64fe02SNick Piggin vb->va = va; 1846cf725ce2SRoman Pen /* At least something should be left free */ 1847cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 1848cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 1849db64fe02SNick Piggin vb->dirty = 0; 18507d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 18517d61bfe8SRoman Pen vb->dirty_max = 0; 1852db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 1853db64fe02SNick Piggin 1854db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 18550f14599cSMatthew Wilcox (Oracle) err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask); 18560f14599cSMatthew Wilcox (Oracle) if (err) { 18570f14599cSMatthew Wilcox (Oracle) kfree(vb); 18580f14599cSMatthew Wilcox (Oracle) free_vmap_area(va); 18590f14599cSMatthew Wilcox (Oracle) return ERR_PTR(err); 18600f14599cSMatthew Wilcox (Oracle) } 1861db64fe02SNick Piggin 1862db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1863db64fe02SNick Piggin spin_lock(&vbq->lock); 186468ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 1865db64fe02SNick Piggin spin_unlock(&vbq->lock); 18663f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1867db64fe02SNick Piggin 1868cf725ce2SRoman Pen return vaddr; 1869db64fe02SNick Piggin } 1870db64fe02SNick Piggin 1871db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 1872db64fe02SNick Piggin { 1873db64fe02SNick Piggin struct vmap_block *tmp; 1874db64fe02SNick Piggin 18750f14599cSMatthew Wilcox (Oracle) tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); 1876db64fe02SNick Piggin BUG_ON(tmp != vb); 1877db64fe02SNick Piggin 187864141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 187922a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 1880db64fe02SNick Piggin } 1881db64fe02SNick Piggin 188202b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 188302b709dfSNick Piggin { 188402b709dfSNick Piggin LIST_HEAD(purge); 188502b709dfSNick Piggin struct vmap_block *vb; 188602b709dfSNick Piggin struct vmap_block *n_vb; 188702b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 188802b709dfSNick Piggin 188902b709dfSNick Piggin rcu_read_lock(); 189002b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 189102b709dfSNick Piggin 189202b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 189302b709dfSNick Piggin continue; 189402b709dfSNick Piggin 189502b709dfSNick Piggin spin_lock(&vb->lock); 189602b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 189702b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 189802b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 18997d61bfe8SRoman Pen vb->dirty_min = 0; 19007d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 190102b709dfSNick Piggin spin_lock(&vbq->lock); 190202b709dfSNick Piggin list_del_rcu(&vb->free_list); 190302b709dfSNick Piggin spin_unlock(&vbq->lock); 190402b709dfSNick Piggin spin_unlock(&vb->lock); 190502b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 190602b709dfSNick Piggin } else 190702b709dfSNick Piggin spin_unlock(&vb->lock); 190802b709dfSNick Piggin } 190902b709dfSNick Piggin rcu_read_unlock(); 191002b709dfSNick Piggin 191102b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 191202b709dfSNick Piggin list_del(&vb->purge); 191302b709dfSNick Piggin free_vmap_block(vb); 191402b709dfSNick Piggin } 191502b709dfSNick Piggin } 191602b709dfSNick Piggin 191702b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 191802b709dfSNick Piggin { 191902b709dfSNick Piggin int cpu; 192002b709dfSNick Piggin 192102b709dfSNick Piggin for_each_possible_cpu(cpu) 192202b709dfSNick Piggin purge_fragmented_blocks(cpu); 192302b709dfSNick Piggin } 192402b709dfSNick Piggin 1925db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 1926db64fe02SNick Piggin { 1927db64fe02SNick Piggin struct vmap_block_queue *vbq; 1928db64fe02SNick Piggin struct vmap_block *vb; 1929cf725ce2SRoman Pen void *vaddr = NULL; 1930db64fe02SNick Piggin unsigned int order; 1931db64fe02SNick Piggin 1932891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1933db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1934aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 1935aa91c4d8SJan Kara /* 1936aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 1937aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 1938aa91c4d8SJan Kara * early. 1939aa91c4d8SJan Kara */ 1940aa91c4d8SJan Kara return NULL; 1941aa91c4d8SJan Kara } 1942db64fe02SNick Piggin order = get_order(size); 1943db64fe02SNick Piggin 1944db64fe02SNick Piggin rcu_read_lock(); 1945db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1946db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1947cf725ce2SRoman Pen unsigned long pages_off; 1948db64fe02SNick Piggin 1949db64fe02SNick Piggin spin_lock(&vb->lock); 1950cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 1951cf725ce2SRoman Pen spin_unlock(&vb->lock); 1952cf725ce2SRoman Pen continue; 1953cf725ce2SRoman Pen } 195402b709dfSNick Piggin 1955cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 1956cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 1957db64fe02SNick Piggin vb->free -= 1UL << order; 1958db64fe02SNick Piggin if (vb->free == 0) { 1959db64fe02SNick Piggin spin_lock(&vbq->lock); 1960de560423SNick Piggin list_del_rcu(&vb->free_list); 1961db64fe02SNick Piggin spin_unlock(&vbq->lock); 1962db64fe02SNick Piggin } 1963cf725ce2SRoman Pen 1964db64fe02SNick Piggin spin_unlock(&vb->lock); 1965db64fe02SNick Piggin break; 1966db64fe02SNick Piggin } 196702b709dfSNick Piggin 19683f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1969db64fe02SNick Piggin rcu_read_unlock(); 1970db64fe02SNick Piggin 1971cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 1972cf725ce2SRoman Pen if (!vaddr) 1973cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 1974db64fe02SNick Piggin 1975cf725ce2SRoman Pen return vaddr; 1976db64fe02SNick Piggin } 1977db64fe02SNick Piggin 197878a0e8c4SChristoph Hellwig static void vb_free(unsigned long addr, unsigned long size) 1979db64fe02SNick Piggin { 1980db64fe02SNick Piggin unsigned long offset; 1981db64fe02SNick Piggin unsigned int order; 1982db64fe02SNick Piggin struct vmap_block *vb; 1983db64fe02SNick Piggin 1984891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1985db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1986b29acbdcSNick Piggin 198778a0e8c4SChristoph Hellwig flush_cache_vunmap(addr, addr + size); 1988b29acbdcSNick Piggin 1989db64fe02SNick Piggin order = get_order(size); 199078a0e8c4SChristoph Hellwig offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 19910f14599cSMatthew Wilcox (Oracle) vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr)); 1992db64fe02SNick Piggin 1993b521c43fSChristoph Hellwig unmap_kernel_range_noflush(addr, size); 199464141da5SJeremy Fitzhardinge 19958e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 199678a0e8c4SChristoph Hellwig flush_tlb_kernel_range(addr, addr + size); 199782a2e924SChintan Pandya 1998db64fe02SNick Piggin spin_lock(&vb->lock); 19997d61bfe8SRoman Pen 20007d61bfe8SRoman Pen /* Expand dirty range */ 20017d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 20027d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2003d086817dSMinChan Kim 2004db64fe02SNick Piggin vb->dirty += 1UL << order; 2005db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 2006de560423SNick Piggin BUG_ON(vb->free); 2007db64fe02SNick Piggin spin_unlock(&vb->lock); 2008db64fe02SNick Piggin free_vmap_block(vb); 2009db64fe02SNick Piggin } else 2010db64fe02SNick Piggin spin_unlock(&vb->lock); 2011db64fe02SNick Piggin } 2012db64fe02SNick Piggin 2013868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2014db64fe02SNick Piggin { 2015db64fe02SNick Piggin int cpu; 2016db64fe02SNick Piggin 20179b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 20189b463334SJeremy Fitzhardinge return; 20199b463334SJeremy Fitzhardinge 20205803ed29SChristoph Hellwig might_sleep(); 20215803ed29SChristoph Hellwig 2022db64fe02SNick Piggin for_each_possible_cpu(cpu) { 2023db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2024db64fe02SNick Piggin struct vmap_block *vb; 2025db64fe02SNick Piggin 2026db64fe02SNick Piggin rcu_read_lock(); 2027db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2028db64fe02SNick Piggin spin_lock(&vb->lock); 20297d61bfe8SRoman Pen if (vb->dirty) { 20307d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 2031db64fe02SNick Piggin unsigned long s, e; 2032b136be5eSJoonsoo Kim 20337d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 20347d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 2035db64fe02SNick Piggin 20367d61bfe8SRoman Pen start = min(s, start); 20377d61bfe8SRoman Pen end = max(e, end); 20387d61bfe8SRoman Pen 2039db64fe02SNick Piggin flush = 1; 2040db64fe02SNick Piggin } 2041db64fe02SNick Piggin spin_unlock(&vb->lock); 2042db64fe02SNick Piggin } 2043db64fe02SNick Piggin rcu_read_unlock(); 2044db64fe02SNick Piggin } 2045db64fe02SNick Piggin 2046f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 20470574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 20480574ecd1SChristoph Hellwig if (!__purge_vmap_area_lazy(start, end) && flush) 20490574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 2050f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 2051db64fe02SNick Piggin } 2052868b104dSRick Edgecombe 2053868b104dSRick Edgecombe /** 2054868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 2055868b104dSRick Edgecombe * 2056868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 2057868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you 2058868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual 2059868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries 2060868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping). 2061868b104dSRick Edgecombe * 2062868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 2063868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases 2064868b104dSRick Edgecombe * from the vmap layer. 2065868b104dSRick Edgecombe */ 2066868b104dSRick Edgecombe void vm_unmap_aliases(void) 2067868b104dSRick Edgecombe { 2068868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2069868b104dSRick Edgecombe int flush = 0; 2070868b104dSRick Edgecombe 2071868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush); 2072868b104dSRick Edgecombe } 2073db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 2074db64fe02SNick Piggin 2075db64fe02SNick Piggin /** 2076db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 2077db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 2078db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 2079db64fe02SNick Piggin */ 2080db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 2081db64fe02SNick Piggin { 208265ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 2083db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 20849c3acf60SChristoph Hellwig struct vmap_area *va; 2085db64fe02SNick Piggin 20865803ed29SChristoph Hellwig might_sleep(); 2087db64fe02SNick Piggin BUG_ON(!addr); 2088db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 2089db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 2090a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 2091db64fe02SNick Piggin 2092d98c9e83SAndrey Ryabinin kasan_poison_vmalloc(mem, size); 2093d98c9e83SAndrey Ryabinin 20949c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 209505e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size); 209678a0e8c4SChristoph Hellwig vb_free(addr, size); 20979c3acf60SChristoph Hellwig return; 20989c3acf60SChristoph Hellwig } 20999c3acf60SChristoph Hellwig 21009c3acf60SChristoph Hellwig va = find_vmap_area(addr); 21019c3acf60SChristoph Hellwig BUG_ON(!va); 210205e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start, 210305e3ff95SChintan Pandya (va->va_end - va->va_start)); 21049c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 2105db64fe02SNick Piggin } 2106db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 2107db64fe02SNick Piggin 2108db64fe02SNick Piggin /** 2109db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 2110db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 2111db64fe02SNick Piggin * @count: number of pages 2112db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 2113e99c97adSRandy Dunlap * 211436437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 211536437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 211636437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 211736437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 211836437638SGioh Kim * the end. Please use this function for short-lived objects. 211936437638SGioh Kim * 2120e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 2121db64fe02SNick Piggin */ 2122d4efd79aSChristoph Hellwig void *vm_map_ram(struct page **pages, unsigned int count, int node) 2123db64fe02SNick Piggin { 212465ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 2125db64fe02SNick Piggin unsigned long addr; 2126db64fe02SNick Piggin void *mem; 2127db64fe02SNick Piggin 2128db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 2129db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 2130db64fe02SNick Piggin if (IS_ERR(mem)) 2131db64fe02SNick Piggin return NULL; 2132db64fe02SNick Piggin addr = (unsigned long)mem; 2133db64fe02SNick Piggin } else { 2134db64fe02SNick Piggin struct vmap_area *va; 2135db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 2136db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 2137db64fe02SNick Piggin if (IS_ERR(va)) 2138db64fe02SNick Piggin return NULL; 2139db64fe02SNick Piggin 2140db64fe02SNick Piggin addr = va->va_start; 2141db64fe02SNick Piggin mem = (void *)addr; 2142db64fe02SNick Piggin } 2143d98c9e83SAndrey Ryabinin 2144d98c9e83SAndrey Ryabinin kasan_unpoison_vmalloc(mem, size); 2145d98c9e83SAndrey Ryabinin 2146*b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 2147*b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) { 2148db64fe02SNick Piggin vm_unmap_ram(mem, count); 2149db64fe02SNick Piggin return NULL; 2150db64fe02SNick Piggin } 2151*b67177ecSNicholas Piggin 2152db64fe02SNick Piggin return mem; 2153db64fe02SNick Piggin } 2154db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 2155db64fe02SNick Piggin 21564341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 215792eac168SMike Rapoport 2158121e6f32SNicholas Piggin static inline unsigned int vm_area_page_order(struct vm_struct *vm) 2159121e6f32SNicholas Piggin { 2160121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2161121e6f32SNicholas Piggin return vm->page_order; 2162121e6f32SNicholas Piggin #else 2163121e6f32SNicholas Piggin return 0; 2164121e6f32SNicholas Piggin #endif 2165121e6f32SNicholas Piggin } 2166121e6f32SNicholas Piggin 2167121e6f32SNicholas Piggin static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 2168121e6f32SNicholas Piggin { 2169121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2170121e6f32SNicholas Piggin vm->page_order = order; 2171121e6f32SNicholas Piggin #else 2172121e6f32SNicholas Piggin BUG_ON(order != 0); 2173121e6f32SNicholas Piggin #endif 2174121e6f32SNicholas Piggin } 2175121e6f32SNicholas Piggin 2176f0aa6617STejun Heo /** 2177be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 2178be9b7335SNicolas Pitre * @vm: vm_struct to add 2179be9b7335SNicolas Pitre * 2180be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 2181be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 2182be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 2183be9b7335SNicolas Pitre * 2184be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2185be9b7335SNicolas Pitre */ 2186be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 2187be9b7335SNicolas Pitre { 2188be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 2189be9b7335SNicolas Pitre 2190be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 2191be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 2192be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 2193be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 2194be9b7335SNicolas Pitre break; 2195be9b7335SNicolas Pitre } else 2196be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 2197be9b7335SNicolas Pitre } 2198be9b7335SNicolas Pitre vm->next = *p; 2199be9b7335SNicolas Pitre *p = vm; 2200be9b7335SNicolas Pitre } 2201be9b7335SNicolas Pitre 2202be9b7335SNicolas Pitre /** 2203f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 2204f0aa6617STejun Heo * @vm: vm_struct to register 2205c0c0a293STejun Heo * @align: requested alignment 2206f0aa6617STejun Heo * 2207f0aa6617STejun Heo * This function is used to register kernel vm area before 2208f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 2209f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 2210f0aa6617STejun Heo * vm->addr contains the allocated address. 2211f0aa6617STejun Heo * 2212f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2213f0aa6617STejun Heo */ 2214c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 2215f0aa6617STejun Heo { 2216f0aa6617STejun Heo static size_t vm_init_off __initdata; 2217c0c0a293STejun Heo unsigned long addr; 2218f0aa6617STejun Heo 2219c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 2220c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 2221c0c0a293STejun Heo 2222c0c0a293STejun Heo vm->addr = (void *)addr; 2223f0aa6617STejun Heo 2224be9b7335SNicolas Pitre vm_area_add_early(vm); 2225f0aa6617STejun Heo } 2226f0aa6617STejun Heo 222768ad4a33SUladzislau Rezki (Sony) static void vmap_init_free_space(void) 222868ad4a33SUladzislau Rezki (Sony) { 222968ad4a33SUladzislau Rezki (Sony) unsigned long vmap_start = 1; 223068ad4a33SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX; 223168ad4a33SUladzislau Rezki (Sony) struct vmap_area *busy, *free; 223268ad4a33SUladzislau Rezki (Sony) 223368ad4a33SUladzislau Rezki (Sony) /* 223468ad4a33SUladzislau Rezki (Sony) * B F B B B F 223568ad4a33SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|- 223668ad4a33SUladzislau Rezki (Sony) * | The KVA space | 223768ad4a33SUladzislau Rezki (Sony) * |<--------------------------------->| 223868ad4a33SUladzislau Rezki (Sony) */ 223968ad4a33SUladzislau Rezki (Sony) list_for_each_entry(busy, &vmap_area_list, list) { 224068ad4a33SUladzislau Rezki (Sony) if (busy->va_start - vmap_start > 0) { 224168ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 224268ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 224368ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 224468ad4a33SUladzislau Rezki (Sony) free->va_end = busy->va_start; 224568ad4a33SUladzislau Rezki (Sony) 224668ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 224768ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 224868ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 224968ad4a33SUladzislau Rezki (Sony) } 225068ad4a33SUladzislau Rezki (Sony) } 225168ad4a33SUladzislau Rezki (Sony) 225268ad4a33SUladzislau Rezki (Sony) vmap_start = busy->va_end; 225368ad4a33SUladzislau Rezki (Sony) } 225468ad4a33SUladzislau Rezki (Sony) 225568ad4a33SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) { 225668ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 225768ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 225868ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 225968ad4a33SUladzislau Rezki (Sony) free->va_end = vmap_end; 226068ad4a33SUladzislau Rezki (Sony) 226168ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 226268ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 226368ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 226468ad4a33SUladzislau Rezki (Sony) } 226568ad4a33SUladzislau Rezki (Sony) } 226668ad4a33SUladzislau Rezki (Sony) } 226768ad4a33SUladzislau Rezki (Sony) 2268db64fe02SNick Piggin void __init vmalloc_init(void) 2269db64fe02SNick Piggin { 2270822c18f2SIvan Kokshaysky struct vmap_area *va; 2271822c18f2SIvan Kokshaysky struct vm_struct *tmp; 2272db64fe02SNick Piggin int i; 2273db64fe02SNick Piggin 227468ad4a33SUladzislau Rezki (Sony) /* 227568ad4a33SUladzislau Rezki (Sony) * Create the cache for vmap_area objects. 227668ad4a33SUladzislau Rezki (Sony) */ 227768ad4a33SUladzislau Rezki (Sony) vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 227868ad4a33SUladzislau Rezki (Sony) 2279db64fe02SNick Piggin for_each_possible_cpu(i) { 2280db64fe02SNick Piggin struct vmap_block_queue *vbq; 228132fcfd40SAl Viro struct vfree_deferred *p; 2282db64fe02SNick Piggin 2283db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 2284db64fe02SNick Piggin spin_lock_init(&vbq->lock); 2285db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 228632fcfd40SAl Viro p = &per_cpu(vfree_deferred, i); 228732fcfd40SAl Viro init_llist_head(&p->list); 228832fcfd40SAl Viro INIT_WORK(&p->wq, free_work); 2289db64fe02SNick Piggin } 22909b463334SJeremy Fitzhardinge 2291822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 2292822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 229368ad4a33SUladzislau Rezki (Sony) va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 229468ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va)) 229568ad4a33SUladzislau Rezki (Sony) continue; 229668ad4a33SUladzislau Rezki (Sony) 2297822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 2298822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 2299dbda591dSKyongHo va->vm = tmp; 230068ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 2301822c18f2SIvan Kokshaysky } 2302ca23e405STejun Heo 230368ad4a33SUladzislau Rezki (Sony) /* 230468ad4a33SUladzislau Rezki (Sony) * Now we can initialize a free vmap space. 230568ad4a33SUladzislau Rezki (Sony) */ 230668ad4a33SUladzislau Rezki (Sony) vmap_init_free_space(); 23079b463334SJeremy Fitzhardinge vmap_initialized = true; 2308db64fe02SNick Piggin } 2309db64fe02SNick Piggin 23108fc48985STejun Heo /** 23118fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 23128fc48985STejun Heo * @addr: start of the VM area to unmap 23138fc48985STejun Heo * @size: size of the VM area to unmap 23148fc48985STejun Heo * 23158fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 23168fc48985STejun Heo * the unmapping and tlb after. 23178fc48985STejun Heo */ 2318db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 2319db64fe02SNick Piggin { 2320db64fe02SNick Piggin unsigned long end = addr + size; 2321f6fcba70STejun Heo 2322f6fcba70STejun Heo flush_cache_vunmap(addr, end); 2323b521c43fSChristoph Hellwig unmap_kernel_range_noflush(addr, size); 2324db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 2325db64fe02SNick Piggin } 2326db64fe02SNick Piggin 2327e36176beSUladzislau Rezki (Sony) static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, 2328e36176beSUladzislau Rezki (Sony) struct vmap_area *va, unsigned long flags, const void *caller) 2329cf88c790STejun Heo { 2330cf88c790STejun Heo vm->flags = flags; 2331cf88c790STejun Heo vm->addr = (void *)va->va_start; 2332cf88c790STejun Heo vm->size = va->va_end - va->va_start; 2333cf88c790STejun Heo vm->caller = caller; 2334db1aecafSMinchan Kim va->vm = vm; 2335e36176beSUladzislau Rezki (Sony) } 2336e36176beSUladzislau Rezki (Sony) 2337e36176beSUladzislau Rezki (Sony) static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 2338e36176beSUladzislau Rezki (Sony) unsigned long flags, const void *caller) 2339e36176beSUladzislau Rezki (Sony) { 2340e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 2341e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vm, va, flags, caller); 2342c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2343f5252e00SMitsuo Hayasaka } 2344cf88c790STejun Heo 234520fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 2346f5252e00SMitsuo Hayasaka { 2347d4033afdSJoonsoo Kim /* 234820fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 2349d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 2350d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 2351d4033afdSJoonsoo Kim */ 2352d4033afdSJoonsoo Kim smp_wmb(); 235320fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 2354cf88c790STejun Heo } 2355cf88c790STejun Heo 2356db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 23572dca6999SDavid Miller unsigned long align, unsigned long flags, unsigned long start, 23585e6cafc8SMarek Szyprowski unsigned long end, int node, gfp_t gfp_mask, const void *caller) 2359db64fe02SNick Piggin { 23600006526dSKautuk Consul struct vmap_area *va; 2361db64fe02SNick Piggin struct vm_struct *area; 2362d98c9e83SAndrey Ryabinin unsigned long requested_size = size; 23631da177e4SLinus Torvalds 236452fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 23651da177e4SLinus Torvalds size = PAGE_ALIGN(size); 236631be8309SOGAWA Hirofumi if (unlikely(!size)) 236731be8309SOGAWA Hirofumi return NULL; 23681da177e4SLinus Torvalds 2369252e5c6eSzijun_hu if (flags & VM_IOREMAP) 2370252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 2371252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 2372252e5c6eSzijun_hu 2373cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 23741da177e4SLinus Torvalds if (unlikely(!area)) 23751da177e4SLinus Torvalds return NULL; 23761da177e4SLinus Torvalds 237771394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 23781da177e4SLinus Torvalds size += PAGE_SIZE; 23791da177e4SLinus Torvalds 2380db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 2381db64fe02SNick Piggin if (IS_ERR(va)) { 2382db64fe02SNick Piggin kfree(area); 2383db64fe02SNick Piggin return NULL; 23841da177e4SLinus Torvalds } 23851da177e4SLinus Torvalds 2386d98c9e83SAndrey Ryabinin kasan_unpoison_vmalloc((void *)va->va_start, requested_size); 2387f5252e00SMitsuo Hayasaka 2388d98c9e83SAndrey Ryabinin setup_vmalloc_vm(area, va, flags, caller); 23893c5c3cfbSDaniel Axtens 23901da177e4SLinus Torvalds return area; 23911da177e4SLinus Torvalds } 23921da177e4SLinus Torvalds 2393c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 2394c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 23955e6cafc8SMarek Szyprowski const void *caller) 2396c2968612SBenjamin Herrenschmidt { 239700ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 239800ef2d2fSDavid Rientjes GFP_KERNEL, caller); 2399c2968612SBenjamin Herrenschmidt } 2400c2968612SBenjamin Herrenschmidt 24011da177e4SLinus Torvalds /** 2402183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 24031da177e4SLinus Torvalds * @size: size of the area 24041da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 24051da177e4SLinus Torvalds * 24061da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 24071da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 24081da177e4SLinus Torvalds * on success or %NULL on failure. 2409a862f68aSMike Rapoport * 2410a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure. 24111da177e4SLinus Torvalds */ 24121da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 24131da177e4SLinus Torvalds { 24142dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 241500ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 241600ef2d2fSDavid Rientjes __builtin_return_address(0)); 241723016969SChristoph Lameter } 241823016969SChristoph Lameter 241923016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 24205e6cafc8SMarek Szyprowski const void *caller) 242123016969SChristoph Lameter { 24222dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 242300ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 24241da177e4SLinus Torvalds } 24251da177e4SLinus Torvalds 2426e9da6e99SMarek Szyprowski /** 2427e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 2428e9da6e99SMarek Szyprowski * @addr: base address 2429e9da6e99SMarek Szyprowski * 2430e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 2431e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 2432e9da6e99SMarek Szyprowski * pointer valid. 2433a862f68aSMike Rapoport * 243474640617SHui Su * Return: the area descriptor on success or %NULL on failure. 2435e9da6e99SMarek Szyprowski */ 2436e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 243783342314SNick Piggin { 2438db64fe02SNick Piggin struct vmap_area *va; 243983342314SNick Piggin 2440db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 2441688fcbfcSPengfei Li if (!va) 24427856dfebSAndi Kleen return NULL; 2443688fcbfcSPengfei Li 2444688fcbfcSPengfei Li return va->vm; 24457856dfebSAndi Kleen } 24467856dfebSAndi Kleen 24471da177e4SLinus Torvalds /** 2448183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 24491da177e4SLinus Torvalds * @addr: base address 24501da177e4SLinus Torvalds * 24511da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 24521da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 24537856dfebSAndi Kleen * on SMP machines, except for its size or flags. 2454a862f68aSMike Rapoport * 245574640617SHui Su * Return: the area descriptor on success or %NULL on failure. 24561da177e4SLinus Torvalds */ 2457b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 24581da177e4SLinus Torvalds { 2459db64fe02SNick Piggin struct vmap_area *va; 2460db64fe02SNick Piggin 24615803ed29SChristoph Hellwig might_sleep(); 24625803ed29SChristoph Hellwig 2463dd3b8353SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 2464dd3b8353SUladzislau Rezki (Sony) va = __find_vmap_area((unsigned long)addr); 2465688fcbfcSPengfei Li if (va && va->vm) { 2466db1aecafSMinchan Kim struct vm_struct *vm = va->vm; 2467f5252e00SMitsuo Hayasaka 2468c69480adSJoonsoo Kim va->vm = NULL; 2469c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2470c69480adSJoonsoo Kim 2471a5af5aa8SAndrey Ryabinin kasan_free_shadow(vm); 2472dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 2473dd32c279SKAMEZAWA Hiroyuki 2474db64fe02SNick Piggin return vm; 2475db64fe02SNick Piggin } 2476dd3b8353SUladzislau Rezki (Sony) 2477dd3b8353SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 2478db64fe02SNick Piggin return NULL; 24791da177e4SLinus Torvalds } 24801da177e4SLinus Torvalds 2481868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area, 2482868b104dSRick Edgecombe int (*set_direct_map)(struct page *page)) 2483868b104dSRick Edgecombe { 2484868b104dSRick Edgecombe int i; 2485868b104dSRick Edgecombe 2486121e6f32SNicholas Piggin /* HUGE_VMALLOC passes small pages to set_direct_map */ 2487868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) 2488868b104dSRick Edgecombe if (page_address(area->pages[i])) 2489868b104dSRick Edgecombe set_direct_map(area->pages[i]); 2490868b104dSRick Edgecombe } 2491868b104dSRick Edgecombe 2492868b104dSRick Edgecombe /* Handle removing and resetting vm mappings related to the vm_struct. */ 2493868b104dSRick Edgecombe static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2494868b104dSRick Edgecombe { 2495868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2496121e6f32SNicholas Piggin unsigned int page_order = vm_area_page_order(area); 2497868b104dSRick Edgecombe int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 249831e67340SRick Edgecombe int flush_dmap = 0; 2499868b104dSRick Edgecombe int i; 2500868b104dSRick Edgecombe 2501868b104dSRick Edgecombe remove_vm_area(area->addr); 2502868b104dSRick Edgecombe 2503868b104dSRick Edgecombe /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 2504868b104dSRick Edgecombe if (!flush_reset) 2505868b104dSRick Edgecombe return; 2506868b104dSRick Edgecombe 2507868b104dSRick Edgecombe /* 2508868b104dSRick Edgecombe * If not deallocating pages, just do the flush of the VM area and 2509868b104dSRick Edgecombe * return. 2510868b104dSRick Edgecombe */ 2511868b104dSRick Edgecombe if (!deallocate_pages) { 2512868b104dSRick Edgecombe vm_unmap_aliases(); 2513868b104dSRick Edgecombe return; 2514868b104dSRick Edgecombe } 2515868b104dSRick Edgecombe 2516868b104dSRick Edgecombe /* 2517868b104dSRick Edgecombe * If execution gets here, flush the vm mapping and reset the direct 2518868b104dSRick Edgecombe * map. Find the start and end range of the direct mappings to make sure 2519868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map. 2520868b104dSRick Edgecombe */ 2521121e6f32SNicholas Piggin for (i = 0; i < area->nr_pages; i += 1U << page_order) { 25228e41f872SRick Edgecombe unsigned long addr = (unsigned long)page_address(area->pages[i]); 25238e41f872SRick Edgecombe if (addr) { 2524121e6f32SNicholas Piggin unsigned long page_size; 2525121e6f32SNicholas Piggin 2526121e6f32SNicholas Piggin page_size = PAGE_SIZE << page_order; 2527868b104dSRick Edgecombe start = min(addr, start); 2528121e6f32SNicholas Piggin end = max(addr + page_size, end); 252931e67340SRick Edgecombe flush_dmap = 1; 2530868b104dSRick Edgecombe } 2531868b104dSRick Edgecombe } 2532868b104dSRick Edgecombe 2533868b104dSRick Edgecombe /* 2534868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if 2535868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and 2536868b104dSRick Edgecombe * reset the direct map permissions to the default. 2537868b104dSRick Edgecombe */ 2538868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush); 253931e67340SRick Edgecombe _vm_unmap_aliases(start, end, flush_dmap); 2540868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush); 2541868b104dSRick Edgecombe } 2542868b104dSRick Edgecombe 2543b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 25441da177e4SLinus Torvalds { 25451da177e4SLinus Torvalds struct vm_struct *area; 25461da177e4SLinus Torvalds 25471da177e4SLinus Torvalds if (!addr) 25481da177e4SLinus Torvalds return; 25491da177e4SLinus Torvalds 2550e69e9d4aSHATAYAMA Daisuke if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 2551ab15d9b4SDan Carpenter addr)) 25521da177e4SLinus Torvalds return; 25531da177e4SLinus Torvalds 25546ade2032SLiviu Dudau area = find_vm_area(addr); 25551da177e4SLinus Torvalds if (unlikely(!area)) { 25564c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 25571da177e4SLinus Torvalds addr); 25581da177e4SLinus Torvalds return; 25591da177e4SLinus Torvalds } 25601da177e4SLinus Torvalds 256105e3ff95SChintan Pandya debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 256205e3ff95SChintan Pandya debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 25639a11b49aSIngo Molnar 2564c041098cSVincenzo Frascino kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); 25653c5c3cfbSDaniel Axtens 2566868b104dSRick Edgecombe vm_remove_mappings(area, deallocate_pages); 2567868b104dSRick Edgecombe 25681da177e4SLinus Torvalds if (deallocate_pages) { 2569121e6f32SNicholas Piggin unsigned int page_order = vm_area_page_order(area); 25701da177e4SLinus Torvalds int i; 25711da177e4SLinus Torvalds 2572121e6f32SNicholas Piggin for (i = 0; i < area->nr_pages; i += 1U << page_order) { 2573bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 2574bf53d6f8SChristoph Lameter 2575bf53d6f8SChristoph Lameter BUG_ON(!page); 2576121e6f32SNicholas Piggin __free_pages(page, page_order); 25771da177e4SLinus Torvalds } 257897105f0aSRoman Gushchin atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); 25791da177e4SLinus Torvalds 2580244d63eeSDavid Rientjes kvfree(area->pages); 25811da177e4SLinus Torvalds } 25821da177e4SLinus Torvalds 25831da177e4SLinus Torvalds kfree(area); 25841da177e4SLinus Torvalds } 25851da177e4SLinus Torvalds 2586bf22e37aSAndrey Ryabinin static inline void __vfree_deferred(const void *addr) 2587bf22e37aSAndrey Ryabinin { 2588bf22e37aSAndrey Ryabinin /* 2589bf22e37aSAndrey Ryabinin * Use raw_cpu_ptr() because this can be called from preemptible 2590bf22e37aSAndrey Ryabinin * context. Preemption is absolutely fine here, because the llist_add() 2591bf22e37aSAndrey Ryabinin * implementation is lockless, so it works even if we are adding to 259273221d88SJeongtae Park * another cpu's list. schedule_work() should be fine with this too. 2593bf22e37aSAndrey Ryabinin */ 2594bf22e37aSAndrey Ryabinin struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 2595bf22e37aSAndrey Ryabinin 2596bf22e37aSAndrey Ryabinin if (llist_add((struct llist_node *)addr, &p->list)) 2597bf22e37aSAndrey Ryabinin schedule_work(&p->wq); 2598bf22e37aSAndrey Ryabinin } 2599bf22e37aSAndrey Ryabinin 2600bf22e37aSAndrey Ryabinin /** 2601bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 2602bf22e37aSAndrey Ryabinin * @addr: memory base address 2603bf22e37aSAndrey Ryabinin * 2604bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 2605bf22e37aSAndrey Ryabinin * except NMIs. 2606bf22e37aSAndrey Ryabinin */ 2607bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 2608bf22e37aSAndrey Ryabinin { 2609bf22e37aSAndrey Ryabinin BUG_ON(in_nmi()); 2610bf22e37aSAndrey Ryabinin 2611bf22e37aSAndrey Ryabinin kmemleak_free(addr); 2612bf22e37aSAndrey Ryabinin 2613bf22e37aSAndrey Ryabinin if (!addr) 2614bf22e37aSAndrey Ryabinin return; 2615bf22e37aSAndrey Ryabinin __vfree_deferred(addr); 2616bf22e37aSAndrey Ryabinin } 2617bf22e37aSAndrey Ryabinin 2618c67dc624SRoman Penyaev static void __vfree(const void *addr) 2619c67dc624SRoman Penyaev { 2620c67dc624SRoman Penyaev if (unlikely(in_interrupt())) 2621c67dc624SRoman Penyaev __vfree_deferred(addr); 2622c67dc624SRoman Penyaev else 2623c67dc624SRoman Penyaev __vunmap(addr, 1); 2624c67dc624SRoman Penyaev } 2625c67dc624SRoman Penyaev 26261da177e4SLinus Torvalds /** 2627fa307474SMatthew Wilcox (Oracle) * vfree - Release memory allocated by vmalloc() 2628fa307474SMatthew Wilcox (Oracle) * @addr: Memory base address 26291da177e4SLinus Torvalds * 2630fa307474SMatthew Wilcox (Oracle) * Free the virtually continuous memory area starting at @addr, as obtained 2631fa307474SMatthew Wilcox (Oracle) * from one of the vmalloc() family of APIs. This will usually also free the 2632fa307474SMatthew Wilcox (Oracle) * physical memory underlying the virtual allocation, but that memory is 2633fa307474SMatthew Wilcox (Oracle) * reference counted, so it will not be freed until the last user goes away. 26341da177e4SLinus Torvalds * 2635fa307474SMatthew Wilcox (Oracle) * If @addr is NULL, no operation is performed. 263632fcfd40SAl Viro * 2637fa307474SMatthew Wilcox (Oracle) * Context: 26383ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context. 2639fa307474SMatthew Wilcox (Oracle) * Must not be called in NMI context (strictly speaking, it could be 2640fa307474SMatthew Wilcox (Oracle) * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 2641fa307474SMatthew Wilcox (Oracle) * conventions for vfree() arch-depenedent would be a really bad idea). 26421da177e4SLinus Torvalds */ 2643b3bdda02SChristoph Lameter void vfree(const void *addr) 26441da177e4SLinus Torvalds { 264532fcfd40SAl Viro BUG_ON(in_nmi()); 264689219d37SCatalin Marinas 264789219d37SCatalin Marinas kmemleak_free(addr); 264889219d37SCatalin Marinas 2649a8dda165SAndrey Ryabinin might_sleep_if(!in_interrupt()); 2650a8dda165SAndrey Ryabinin 265132fcfd40SAl Viro if (!addr) 265232fcfd40SAl Viro return; 2653c67dc624SRoman Penyaev 2654c67dc624SRoman Penyaev __vfree(addr); 26551da177e4SLinus Torvalds } 26561da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 26571da177e4SLinus Torvalds 26581da177e4SLinus Torvalds /** 26591da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 26601da177e4SLinus Torvalds * @addr: memory base address 26611da177e4SLinus Torvalds * 26621da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 26631da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 26641da177e4SLinus Torvalds * 266580e93effSPekka Enberg * Must not be called in interrupt context. 26661da177e4SLinus Torvalds */ 2667b3bdda02SChristoph Lameter void vunmap(const void *addr) 26681da177e4SLinus Torvalds { 26691da177e4SLinus Torvalds BUG_ON(in_interrupt()); 267034754b69SPeter Zijlstra might_sleep(); 267132fcfd40SAl Viro if (addr) 26721da177e4SLinus Torvalds __vunmap(addr, 0); 26731da177e4SLinus Torvalds } 26741da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 26751da177e4SLinus Torvalds 26761da177e4SLinus Torvalds /** 26771da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 26781da177e4SLinus Torvalds * @pages: array of page pointers 26791da177e4SLinus Torvalds * @count: number of pages to map 26801da177e4SLinus Torvalds * @flags: vm_area->flags 26811da177e4SLinus Torvalds * @prot: page protection for the mapping 26821da177e4SLinus Torvalds * 2683b944afc9SChristoph Hellwig * Maps @count pages from @pages into contiguous kernel virtual space. 2684b944afc9SChristoph Hellwig * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 2685b944afc9SChristoph Hellwig * (which must be kmalloc or vmalloc memory) and one reference per pages in it 2686b944afc9SChristoph Hellwig * are transferred from the caller to vmap(), and will be freed / dropped when 2687b944afc9SChristoph Hellwig * vfree() is called on the return value. 2688a862f68aSMike Rapoport * 2689a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 26901da177e4SLinus Torvalds */ 26911da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 26921da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 26931da177e4SLinus Torvalds { 26941da177e4SLinus Torvalds struct vm_struct *area; 2695*b67177ecSNicholas Piggin unsigned long addr; 269665ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 26971da177e4SLinus Torvalds 269834754b69SPeter Zijlstra might_sleep(); 269934754b69SPeter Zijlstra 2700ca79b0c2SArun KS if (count > totalram_pages()) 27011da177e4SLinus Torvalds return NULL; 27021da177e4SLinus Torvalds 270365ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 270465ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 27051da177e4SLinus Torvalds if (!area) 27061da177e4SLinus Torvalds return NULL; 270723016969SChristoph Lameter 2708*b67177ecSNicholas Piggin addr = (unsigned long)area->addr; 2709*b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 2710*b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) { 27111da177e4SLinus Torvalds vunmap(area->addr); 27121da177e4SLinus Torvalds return NULL; 27131da177e4SLinus Torvalds } 27141da177e4SLinus Torvalds 2715c22ee528SMiaohe Lin if (flags & VM_MAP_PUT_PAGES) { 2716b944afc9SChristoph Hellwig area->pages = pages; 2717c22ee528SMiaohe Lin area->nr_pages = count; 2718c22ee528SMiaohe Lin } 27191da177e4SLinus Torvalds return area->addr; 27201da177e4SLinus Torvalds } 27211da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 27221da177e4SLinus Torvalds 27233e9a9e25SChristoph Hellwig #ifdef CONFIG_VMAP_PFN 27243e9a9e25SChristoph Hellwig struct vmap_pfn_data { 27253e9a9e25SChristoph Hellwig unsigned long *pfns; 27263e9a9e25SChristoph Hellwig pgprot_t prot; 27273e9a9e25SChristoph Hellwig unsigned int idx; 27283e9a9e25SChristoph Hellwig }; 27293e9a9e25SChristoph Hellwig 27303e9a9e25SChristoph Hellwig static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 27313e9a9e25SChristoph Hellwig { 27323e9a9e25SChristoph Hellwig struct vmap_pfn_data *data = private; 27333e9a9e25SChristoph Hellwig 27343e9a9e25SChristoph Hellwig if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) 27353e9a9e25SChristoph Hellwig return -EINVAL; 27363e9a9e25SChristoph Hellwig *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); 27373e9a9e25SChristoph Hellwig return 0; 27383e9a9e25SChristoph Hellwig } 27393e9a9e25SChristoph Hellwig 27403e9a9e25SChristoph Hellwig /** 27413e9a9e25SChristoph Hellwig * vmap_pfn - map an array of PFNs into virtually contiguous space 27423e9a9e25SChristoph Hellwig * @pfns: array of PFNs 27433e9a9e25SChristoph Hellwig * @count: number of pages to map 27443e9a9e25SChristoph Hellwig * @prot: page protection for the mapping 27453e9a9e25SChristoph Hellwig * 27463e9a9e25SChristoph Hellwig * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 27473e9a9e25SChristoph Hellwig * the start address of the mapping. 27483e9a9e25SChristoph Hellwig */ 27493e9a9e25SChristoph Hellwig void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 27503e9a9e25SChristoph Hellwig { 27513e9a9e25SChristoph Hellwig struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 27523e9a9e25SChristoph Hellwig struct vm_struct *area; 27533e9a9e25SChristoph Hellwig 27543e9a9e25SChristoph Hellwig area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 27553e9a9e25SChristoph Hellwig __builtin_return_address(0)); 27563e9a9e25SChristoph Hellwig if (!area) 27573e9a9e25SChristoph Hellwig return NULL; 27583e9a9e25SChristoph Hellwig if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 27593e9a9e25SChristoph Hellwig count * PAGE_SIZE, vmap_pfn_apply, &data)) { 27603e9a9e25SChristoph Hellwig free_vm_area(area); 27613e9a9e25SChristoph Hellwig return NULL; 27623e9a9e25SChristoph Hellwig } 27633e9a9e25SChristoph Hellwig return area->addr; 27643e9a9e25SChristoph Hellwig } 27653e9a9e25SChristoph Hellwig EXPORT_SYMBOL_GPL(vmap_pfn); 27663e9a9e25SChristoph Hellwig #endif /* CONFIG_VMAP_PFN */ 27673e9a9e25SChristoph Hellwig 2768e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 2769121e6f32SNicholas Piggin pgprot_t prot, unsigned int page_shift, 2770121e6f32SNicholas Piggin int node) 27711da177e4SLinus Torvalds { 2772930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 2773121e6f32SNicholas Piggin unsigned long addr = (unsigned long)area->addr; 2774121e6f32SNicholas Piggin unsigned long size = get_vm_area_size(area); 277534fe6537SAndrew Morton unsigned long array_size; 2776121e6f32SNicholas Piggin unsigned int nr_small_pages = size >> PAGE_SHIFT; 2777121e6f32SNicholas Piggin unsigned int page_order; 2778f255935bSChristoph Hellwig struct page **pages; 2779121e6f32SNicholas Piggin unsigned int i; 27801da177e4SLinus Torvalds 2781121e6f32SNicholas Piggin array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 2782f255935bSChristoph Hellwig gfp_mask |= __GFP_NOWARN; 2783f255935bSChristoph Hellwig if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 2784f255935bSChristoph Hellwig gfp_mask |= __GFP_HIGHMEM; 27851da177e4SLinus Torvalds 27861da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 27878757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 2788f255935bSChristoph Hellwig pages = __vmalloc_node(array_size, 1, nested_gfp, node, 2789f255935bSChristoph Hellwig area->caller); 2790286e1ea3SAndrew Morton } else { 2791976d6dfbSJan Beulich pages = kmalloc_node(array_size, nested_gfp, node); 2792286e1ea3SAndrew Morton } 27937ea36242SAustin Kim 27947ea36242SAustin Kim if (!pages) { 27958945a723SUladzislau Rezki (Sony) free_vm_area(area); 27961da177e4SLinus Torvalds return NULL; 27971da177e4SLinus Torvalds } 27981da177e4SLinus Torvalds 27997ea36242SAustin Kim area->pages = pages; 2800121e6f32SNicholas Piggin area->nr_pages = nr_small_pages; 2801121e6f32SNicholas Piggin set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 28027ea36242SAustin Kim 2803121e6f32SNicholas Piggin page_order = vm_area_page_order(area); 2804121e6f32SNicholas Piggin 2805121e6f32SNicholas Piggin /* 2806121e6f32SNicholas Piggin * Careful, we allocate and map page_order pages, but tracking is done 2807121e6f32SNicholas Piggin * per PAGE_SIZE page so as to keep the vm_struct APIs independent of 2808121e6f32SNicholas Piggin * the physical/mapped size. 2809121e6f32SNicholas Piggin */ 2810121e6f32SNicholas Piggin for (i = 0; i < area->nr_pages; i += 1U << page_order) { 2811bf53d6f8SChristoph Lameter struct page *page; 2812121e6f32SNicholas Piggin int p; 2813bf53d6f8SChristoph Lameter 2814121e6f32SNicholas Piggin /* Compound pages required for remap_vmalloc_page */ 2815121e6f32SNicholas Piggin page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order); 2816bf53d6f8SChristoph Lameter if (unlikely(!page)) { 281782afbc32SHui Su /* Successfully allocated i pages, free them in __vfree() */ 28181da177e4SLinus Torvalds area->nr_pages = i; 281997105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 28201da177e4SLinus Torvalds goto fail; 28211da177e4SLinus Torvalds } 2822121e6f32SNicholas Piggin 2823121e6f32SNicholas Piggin for (p = 0; p < (1U << page_order); p++) 2824121e6f32SNicholas Piggin area->pages[i + p] = page + p; 2825121e6f32SNicholas Piggin 2826dcf61ff0SLiu Xiang if (gfpflags_allow_blocking(gfp_mask)) 2827660654f9SEric Dumazet cond_resched(); 28281da177e4SLinus Torvalds } 282997105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 28301da177e4SLinus Torvalds 2831121e6f32SNicholas Piggin if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) 28321da177e4SLinus Torvalds goto fail; 2833ed1f324cSChristoph Hellwig 28341da177e4SLinus Torvalds return area->addr; 28351da177e4SLinus Torvalds 28361da177e4SLinus Torvalds fail: 2837a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 28387877cdccSMichal Hocko "vmalloc: allocation failure, allocated %ld of %ld bytes", 2839121e6f32SNicholas Piggin (area->nr_pages*PAGE_SIZE), size); 2840c67dc624SRoman Penyaev __vfree(area->addr); 28411da177e4SLinus Torvalds return NULL; 28421da177e4SLinus Torvalds } 28431da177e4SLinus Torvalds 2844d0a21265SDavid Rientjes /** 2845d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 2846d0a21265SDavid Rientjes * @size: allocation size 2847d0a21265SDavid Rientjes * @align: desired alignment 2848d0a21265SDavid Rientjes * @start: vm area range start 2849d0a21265SDavid Rientjes * @end: vm area range end 2850d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 2851d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 2852cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 285300ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2854d0a21265SDavid Rientjes * @caller: caller's return address 2855d0a21265SDavid Rientjes * 2856d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 2857d0a21265SDavid Rientjes * allocator with @gfp_mask flags. Map them into contiguous 2858d0a21265SDavid Rientjes * kernel virtual space, using a pagetable protection of @prot. 2859a862f68aSMike Rapoport * 2860a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 2861d0a21265SDavid Rientjes */ 2862d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 2863d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 2864cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 2865cb9e3c29SAndrey Ryabinin const void *caller) 2866930fc45aSChristoph Lameter { 2867d0a21265SDavid Rientjes struct vm_struct *area; 2868d0a21265SDavid Rientjes void *addr; 2869d0a21265SDavid Rientjes unsigned long real_size = size; 2870121e6f32SNicholas Piggin unsigned long real_align = align; 2871121e6f32SNicholas Piggin unsigned int shift = PAGE_SHIFT; 2872d0a21265SDavid Rientjes 2873121e6f32SNicholas Piggin if (!size || (size >> PAGE_SHIFT) > totalram_pages()) { 2874121e6f32SNicholas Piggin area = NULL; 2875de7d2b56SJoe Perches goto fail; 2876121e6f32SNicholas Piggin } 2877d0a21265SDavid Rientjes 2878121e6f32SNicholas Piggin if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) && 2879121e6f32SNicholas Piggin arch_vmap_pmd_supported(prot)) { 2880121e6f32SNicholas Piggin unsigned long size_per_node; 2881121e6f32SNicholas Piggin 2882121e6f32SNicholas Piggin /* 2883121e6f32SNicholas Piggin * Try huge pages. Only try for PAGE_KERNEL allocations, 2884121e6f32SNicholas Piggin * others like modules don't yet expect huge pages in 2885121e6f32SNicholas Piggin * their allocations due to apply_to_page_range not 2886121e6f32SNicholas Piggin * supporting them. 2887121e6f32SNicholas Piggin */ 2888121e6f32SNicholas Piggin 2889121e6f32SNicholas Piggin size_per_node = size; 2890121e6f32SNicholas Piggin if (node == NUMA_NO_NODE) 2891121e6f32SNicholas Piggin size_per_node /= num_online_nodes(); 2892121e6f32SNicholas Piggin if (size_per_node >= PMD_SIZE) { 2893121e6f32SNicholas Piggin shift = PMD_SHIFT; 2894121e6f32SNicholas Piggin align = max(real_align, 1UL << shift); 2895121e6f32SNicholas Piggin size = ALIGN(real_size, 1UL << shift); 2896121e6f32SNicholas Piggin } 2897121e6f32SNicholas Piggin } 2898121e6f32SNicholas Piggin 2899121e6f32SNicholas Piggin again: 2900121e6f32SNicholas Piggin size = PAGE_ALIGN(size); 2901121e6f32SNicholas Piggin area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | 2902cb9e3c29SAndrey Ryabinin vm_flags, start, end, node, gfp_mask, caller); 2903d0a21265SDavid Rientjes if (!area) 2904de7d2b56SJoe Perches goto fail; 2905d0a21265SDavid Rientjes 2906121e6f32SNicholas Piggin addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 29071368edf0SMel Gorman if (!addr) 2908121e6f32SNicholas Piggin goto fail; 290989219d37SCatalin Marinas 291089219d37SCatalin Marinas /* 291120fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 291220fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 29134341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 2914f5252e00SMitsuo Hayasaka */ 291520fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 2916f5252e00SMitsuo Hayasaka 291794f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask); 291889219d37SCatalin Marinas 291989219d37SCatalin Marinas return addr; 2920de7d2b56SJoe Perches 2921de7d2b56SJoe Perches fail: 2922121e6f32SNicholas Piggin if (shift > PAGE_SHIFT) { 2923121e6f32SNicholas Piggin shift = PAGE_SHIFT; 2924121e6f32SNicholas Piggin align = real_align; 2925121e6f32SNicholas Piggin size = real_size; 2926121e6f32SNicholas Piggin goto again; 2927121e6f32SNicholas Piggin } 2928121e6f32SNicholas Piggin 2929121e6f32SNicholas Piggin if (!area) { 2930121e6f32SNicholas Piggin /* Warn for area allocation, page allocations already warn */ 2931a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 29327877cdccSMichal Hocko "vmalloc: allocation failure: %lu bytes", real_size); 2933121e6f32SNicholas Piggin } 2934de7d2b56SJoe Perches return NULL; 2935930fc45aSChristoph Lameter } 2936930fc45aSChristoph Lameter 29371da177e4SLinus Torvalds /** 2938930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 29391da177e4SLinus Torvalds * @size: allocation size 29402dca6999SDavid Miller * @align: desired alignment 29411da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 294200ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2943c85d194bSRandy Dunlap * @caller: caller's return address 29441da177e4SLinus Torvalds * 2945f38fcb9cSChristoph Hellwig * Allocate enough pages to cover @size from the page level allocator with 2946f38fcb9cSChristoph Hellwig * @gfp_mask flags. Map them into contiguous kernel virtual space. 2947a7c3e901SMichal Hocko * 2948dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 2949a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported 2950a7c3e901SMichal Hocko * 2951a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted 2952a7c3e901SMichal Hocko * with mm people. 2953a862f68aSMike Rapoport * 2954a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 29551da177e4SLinus Torvalds */ 29562b905948SChristoph Hellwig void *__vmalloc_node(unsigned long size, unsigned long align, 2957f38fcb9cSChristoph Hellwig gfp_t gfp_mask, int node, const void *caller) 29581da177e4SLinus Torvalds { 2959d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 2960f38fcb9cSChristoph Hellwig gfp_mask, PAGE_KERNEL, 0, node, caller); 29611da177e4SLinus Torvalds } 2962c3f896dcSChristoph Hellwig /* 2963c3f896dcSChristoph Hellwig * This is only for performance analysis of vmalloc and stress purpose. 2964c3f896dcSChristoph Hellwig * It is required by vmalloc test module, therefore do not use it other 2965c3f896dcSChristoph Hellwig * than that. 2966c3f896dcSChristoph Hellwig */ 2967c3f896dcSChristoph Hellwig #ifdef CONFIG_TEST_VMALLOC_MODULE 2968c3f896dcSChristoph Hellwig EXPORT_SYMBOL_GPL(__vmalloc_node); 2969c3f896dcSChristoph Hellwig #endif 29701da177e4SLinus Torvalds 297188dca4caSChristoph Hellwig void *__vmalloc(unsigned long size, gfp_t gfp_mask) 2972930fc45aSChristoph Lameter { 2973f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, 297423016969SChristoph Lameter __builtin_return_address(0)); 2975930fc45aSChristoph Lameter } 29761da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 29771da177e4SLinus Torvalds 29781da177e4SLinus Torvalds /** 29791da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 29801da177e4SLinus Torvalds * @size: allocation size 298192eac168SMike Rapoport * 29821da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 29831da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 29841da177e4SLinus Torvalds * 2985c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 29861da177e4SLinus Torvalds * use __vmalloc() instead. 2987a862f68aSMike Rapoport * 2988a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 29891da177e4SLinus Torvalds */ 29901da177e4SLinus Torvalds void *vmalloc(unsigned long size) 29911da177e4SLinus Torvalds { 29924d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, 29934d39d728SChristoph Hellwig __builtin_return_address(0)); 29941da177e4SLinus Torvalds } 29951da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 29961da177e4SLinus Torvalds 2997930fc45aSChristoph Lameter /** 2998e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 2999e1ca7788SDave Young * @size: allocation size 300092eac168SMike Rapoport * 3001e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 3002e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 3003e1ca7788SDave Young * The memory allocated is set to zero. 3004e1ca7788SDave Young * 3005e1ca7788SDave Young * For tight control over page level allocator and protection flags 3006e1ca7788SDave Young * use __vmalloc() instead. 3007a862f68aSMike Rapoport * 3008a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3009e1ca7788SDave Young */ 3010e1ca7788SDave Young void *vzalloc(unsigned long size) 3011e1ca7788SDave Young { 30124d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 30134d39d728SChristoph Hellwig __builtin_return_address(0)); 3014e1ca7788SDave Young } 3015e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 3016e1ca7788SDave Young 3017e1ca7788SDave Young /** 3018ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 301983342314SNick Piggin * @size: allocation size 3020ead04089SRolf Eike Beer * 3021ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 3022ead04089SRolf Eike Beer * without leaking data. 3023a862f68aSMike Rapoport * 3024a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 302583342314SNick Piggin */ 302683342314SNick Piggin void *vmalloc_user(unsigned long size) 302783342314SNick Piggin { 3028bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3029bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 3030bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 303100ef2d2fSDavid Rientjes __builtin_return_address(0)); 303283342314SNick Piggin } 303383342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 303483342314SNick Piggin 303583342314SNick Piggin /** 3036930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 3037930fc45aSChristoph Lameter * @size: allocation size 3038d44e0780SRandy Dunlap * @node: numa node 3039930fc45aSChristoph Lameter * 3040930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 3041930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 3042930fc45aSChristoph Lameter * 3043c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 3044930fc45aSChristoph Lameter * use __vmalloc() instead. 3045a862f68aSMike Rapoport * 3046a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3047930fc45aSChristoph Lameter */ 3048930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 3049930fc45aSChristoph Lameter { 3050f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, node, 3051f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 3052930fc45aSChristoph Lameter } 3053930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 3054930fc45aSChristoph Lameter 3055e1ca7788SDave Young /** 3056e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 3057e1ca7788SDave Young * @size: allocation size 3058e1ca7788SDave Young * @node: numa node 3059e1ca7788SDave Young * 3060e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 3061e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 3062e1ca7788SDave Young * The memory allocated is set to zero. 3063e1ca7788SDave Young * 3064a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3065e1ca7788SDave Young */ 3066e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 3067e1ca7788SDave Young { 30684d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, 30694d39d728SChristoph Hellwig __builtin_return_address(0)); 3070e1ca7788SDave Young } 3071e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 3072e1ca7788SDave Young 30730d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 3074698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 30750d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 3076698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 30770d08e0d3SAndi Kleen #else 3078698d0831SMichal Hocko /* 3079698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others 3080698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone. 3081698d0831SMichal Hocko */ 3082698d0831SMichal Hocko #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 30830d08e0d3SAndi Kleen #endif 30840d08e0d3SAndi Kleen 30851da177e4SLinus Torvalds /** 30861da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 30871da177e4SLinus Torvalds * @size: allocation size 30881da177e4SLinus Torvalds * 30891da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 30901da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 3091a862f68aSMike Rapoport * 3092a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 30931da177e4SLinus Torvalds */ 30941da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 30951da177e4SLinus Torvalds { 3096f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 3097f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 30981da177e4SLinus Torvalds } 30991da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 31001da177e4SLinus Torvalds 310183342314SNick Piggin /** 3102ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 310383342314SNick Piggin * @size: allocation size 3104ead04089SRolf Eike Beer * 3105ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 3106ead04089SRolf Eike Beer * mapped to userspace without leaking data. 3107a862f68aSMike Rapoport * 3108a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 310983342314SNick Piggin */ 311083342314SNick Piggin void *vmalloc_32_user(unsigned long size) 311183342314SNick Piggin { 3112bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3113bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 3114bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 31155a82ac71SRoman Penyaev __builtin_return_address(0)); 311683342314SNick Piggin } 311783342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 311883342314SNick Piggin 3119d0107eb0SKAMEZAWA Hiroyuki /* 3120d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 3121d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 3122d0107eb0SKAMEZAWA Hiroyuki */ 3123d0107eb0SKAMEZAWA Hiroyuki 3124d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 3125d0107eb0SKAMEZAWA Hiroyuki { 3126d0107eb0SKAMEZAWA Hiroyuki struct page *p; 3127d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 3128d0107eb0SKAMEZAWA Hiroyuki 3129d0107eb0SKAMEZAWA Hiroyuki while (count) { 3130d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 3131d0107eb0SKAMEZAWA Hiroyuki 3132891c49abSAlexander Kuleshov offset = offset_in_page(addr); 3133d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 3134d0107eb0SKAMEZAWA Hiroyuki if (length > count) 3135d0107eb0SKAMEZAWA Hiroyuki length = count; 3136d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 3137d0107eb0SKAMEZAWA Hiroyuki /* 3138d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 3139d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 3140d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 3141d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 3142d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 3143d0107eb0SKAMEZAWA Hiroyuki */ 3144d0107eb0SKAMEZAWA Hiroyuki if (p) { 3145d0107eb0SKAMEZAWA Hiroyuki /* 3146d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 3147d0107eb0SKAMEZAWA Hiroyuki * function description) 3148d0107eb0SKAMEZAWA Hiroyuki */ 31499b04c5feSCong Wang void *map = kmap_atomic(p); 3150d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 31519b04c5feSCong Wang kunmap_atomic(map); 3152d0107eb0SKAMEZAWA Hiroyuki } else 3153d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 3154d0107eb0SKAMEZAWA Hiroyuki 3155d0107eb0SKAMEZAWA Hiroyuki addr += length; 3156d0107eb0SKAMEZAWA Hiroyuki buf += length; 3157d0107eb0SKAMEZAWA Hiroyuki copied += length; 3158d0107eb0SKAMEZAWA Hiroyuki count -= length; 3159d0107eb0SKAMEZAWA Hiroyuki } 3160d0107eb0SKAMEZAWA Hiroyuki return copied; 3161d0107eb0SKAMEZAWA Hiroyuki } 3162d0107eb0SKAMEZAWA Hiroyuki 3163d0107eb0SKAMEZAWA Hiroyuki static int aligned_vwrite(char *buf, char *addr, unsigned long count) 3164d0107eb0SKAMEZAWA Hiroyuki { 3165d0107eb0SKAMEZAWA Hiroyuki struct page *p; 3166d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 3167d0107eb0SKAMEZAWA Hiroyuki 3168d0107eb0SKAMEZAWA Hiroyuki while (count) { 3169d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 3170d0107eb0SKAMEZAWA Hiroyuki 3171891c49abSAlexander Kuleshov offset = offset_in_page(addr); 3172d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 3173d0107eb0SKAMEZAWA Hiroyuki if (length > count) 3174d0107eb0SKAMEZAWA Hiroyuki length = count; 3175d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 3176d0107eb0SKAMEZAWA Hiroyuki /* 3177d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 3178d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 3179d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 3180d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 3181d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 3182d0107eb0SKAMEZAWA Hiroyuki */ 3183d0107eb0SKAMEZAWA Hiroyuki if (p) { 3184d0107eb0SKAMEZAWA Hiroyuki /* 3185d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 3186d0107eb0SKAMEZAWA Hiroyuki * function description) 3187d0107eb0SKAMEZAWA Hiroyuki */ 31889b04c5feSCong Wang void *map = kmap_atomic(p); 3189d0107eb0SKAMEZAWA Hiroyuki memcpy(map + offset, buf, length); 31909b04c5feSCong Wang kunmap_atomic(map); 3191d0107eb0SKAMEZAWA Hiroyuki } 3192d0107eb0SKAMEZAWA Hiroyuki addr += length; 3193d0107eb0SKAMEZAWA Hiroyuki buf += length; 3194d0107eb0SKAMEZAWA Hiroyuki copied += length; 3195d0107eb0SKAMEZAWA Hiroyuki count -= length; 3196d0107eb0SKAMEZAWA Hiroyuki } 3197d0107eb0SKAMEZAWA Hiroyuki return copied; 3198d0107eb0SKAMEZAWA Hiroyuki } 3199d0107eb0SKAMEZAWA Hiroyuki 3200d0107eb0SKAMEZAWA Hiroyuki /** 3201d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 3202d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 3203d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 3204d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 3205d0107eb0SKAMEZAWA Hiroyuki * 3206d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 3207d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 3208d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 3209d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 3210d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 3211d0107eb0SKAMEZAWA Hiroyuki * 3212d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 3213a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 3214d0107eb0SKAMEZAWA Hiroyuki * 3215d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 3216d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 3217d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 3218d9009d67SGeert Uytterhoeven * any information, as /dev/kmem. 3219a862f68aSMike Rapoport * 3220a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased 3221a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't 3222a862f68aSMike Rapoport * include any intersection with valid vmalloc area 3223d0107eb0SKAMEZAWA Hiroyuki */ 32241da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 32251da177e4SLinus Torvalds { 3226e81ce85fSJoonsoo Kim struct vmap_area *va; 3227e81ce85fSJoonsoo Kim struct vm_struct *vm; 32281da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 3229d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 32301da177e4SLinus Torvalds unsigned long n; 32311da177e4SLinus Torvalds 32321da177e4SLinus Torvalds /* Don't allow overflow */ 32331da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 32341da177e4SLinus Torvalds count = -(unsigned long) addr; 32351da177e4SLinus Torvalds 3236e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 3237f608788cSSerapheim Dimitropoulos va = __find_vmap_area((unsigned long)addr); 3238f608788cSSerapheim Dimitropoulos if (!va) 3239f608788cSSerapheim Dimitropoulos goto finished; 3240f608788cSSerapheim Dimitropoulos list_for_each_entry_from(va, &vmap_area_list, list) { 3241e81ce85fSJoonsoo Kim if (!count) 3242e81ce85fSJoonsoo Kim break; 3243e81ce85fSJoonsoo Kim 3244688fcbfcSPengfei Li if (!va->vm) 3245e81ce85fSJoonsoo Kim continue; 3246e81ce85fSJoonsoo Kim 3247e81ce85fSJoonsoo Kim vm = va->vm; 3248e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 3249762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 32501da177e4SLinus Torvalds continue; 32511da177e4SLinus Torvalds while (addr < vaddr) { 32521da177e4SLinus Torvalds if (count == 0) 32531da177e4SLinus Torvalds goto finished; 32541da177e4SLinus Torvalds *buf = '\0'; 32551da177e4SLinus Torvalds buf++; 32561da177e4SLinus Torvalds addr++; 32571da177e4SLinus Torvalds count--; 32581da177e4SLinus Torvalds } 3259762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 3260d0107eb0SKAMEZAWA Hiroyuki if (n > count) 3261d0107eb0SKAMEZAWA Hiroyuki n = count; 3262e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) 3263d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 3264d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 3265d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 3266d0107eb0SKAMEZAWA Hiroyuki buf += n; 3267d0107eb0SKAMEZAWA Hiroyuki addr += n; 3268d0107eb0SKAMEZAWA Hiroyuki count -= n; 32691da177e4SLinus Torvalds } 32701da177e4SLinus Torvalds finished: 3271e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 3272d0107eb0SKAMEZAWA Hiroyuki 3273d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 3274d0107eb0SKAMEZAWA Hiroyuki return 0; 3275d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 3276d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 3277d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 3278d0107eb0SKAMEZAWA Hiroyuki 3279d0107eb0SKAMEZAWA Hiroyuki return buflen; 32801da177e4SLinus Torvalds } 32811da177e4SLinus Torvalds 3282d0107eb0SKAMEZAWA Hiroyuki /** 3283d0107eb0SKAMEZAWA Hiroyuki * vwrite() - write vmalloc area in a safe way. 3284d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for source data 3285d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 3286d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 3287d0107eb0SKAMEZAWA Hiroyuki * 3288d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 3289d0107eb0SKAMEZAWA Hiroyuki * copy data from a buffer to the given addr. If specified range of 3290d0107eb0SKAMEZAWA Hiroyuki * [addr...addr+count) includes some valid address, data is copied from 3291d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, no copy to hole. 3292d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 3293d0107eb0SKAMEZAWA Hiroyuki * 3294d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 3295a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 3296d0107eb0SKAMEZAWA Hiroyuki * 3297d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vwrite() is never necessary because the caller 3298d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 3299d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 3300d9009d67SGeert Uytterhoeven * any information, as /dev/kmem. 3301a862f68aSMike Rapoport * 3302a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be 3303a862f68aSMike Rapoport * increased (same number as @count) or %0 if [addr...addr+count) 3304a862f68aSMike Rapoport * doesn't include any intersection with valid vmalloc area 3305d0107eb0SKAMEZAWA Hiroyuki */ 33061da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 33071da177e4SLinus Torvalds { 3308e81ce85fSJoonsoo Kim struct vmap_area *va; 3309e81ce85fSJoonsoo Kim struct vm_struct *vm; 3310d0107eb0SKAMEZAWA Hiroyuki char *vaddr; 3311d0107eb0SKAMEZAWA Hiroyuki unsigned long n, buflen; 3312d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 33131da177e4SLinus Torvalds 33141da177e4SLinus Torvalds /* Don't allow overflow */ 33151da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 33161da177e4SLinus Torvalds count = -(unsigned long) addr; 3317d0107eb0SKAMEZAWA Hiroyuki buflen = count; 33181da177e4SLinus Torvalds 3319e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 3320e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 3321e81ce85fSJoonsoo Kim if (!count) 3322e81ce85fSJoonsoo Kim break; 3323e81ce85fSJoonsoo Kim 3324688fcbfcSPengfei Li if (!va->vm) 3325e81ce85fSJoonsoo Kim continue; 3326e81ce85fSJoonsoo Kim 3327e81ce85fSJoonsoo Kim vm = va->vm; 3328e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 3329762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 33301da177e4SLinus Torvalds continue; 33311da177e4SLinus Torvalds while (addr < vaddr) { 33321da177e4SLinus Torvalds if (count == 0) 33331da177e4SLinus Torvalds goto finished; 33341da177e4SLinus Torvalds buf++; 33351da177e4SLinus Torvalds addr++; 33361da177e4SLinus Torvalds count--; 33371da177e4SLinus Torvalds } 3338762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 3339d0107eb0SKAMEZAWA Hiroyuki if (n > count) 3340d0107eb0SKAMEZAWA Hiroyuki n = count; 3341e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) { 3342d0107eb0SKAMEZAWA Hiroyuki aligned_vwrite(buf, addr, n); 3343d0107eb0SKAMEZAWA Hiroyuki copied++; 3344d0107eb0SKAMEZAWA Hiroyuki } 3345d0107eb0SKAMEZAWA Hiroyuki buf += n; 3346d0107eb0SKAMEZAWA Hiroyuki addr += n; 3347d0107eb0SKAMEZAWA Hiroyuki count -= n; 33481da177e4SLinus Torvalds } 33491da177e4SLinus Torvalds finished: 3350e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 3351d0107eb0SKAMEZAWA Hiroyuki if (!copied) 3352d0107eb0SKAMEZAWA Hiroyuki return 0; 3353d0107eb0SKAMEZAWA Hiroyuki return buflen; 33541da177e4SLinus Torvalds } 335583342314SNick Piggin 335683342314SNick Piggin /** 3357e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 3358e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 3359e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 3360e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 3361bdebd6a2SJann Horn * @pgoff: offset from @kaddr to start at 3362e69e9d4aSHATAYAMA Daisuke * @size: size of map area 3363e69e9d4aSHATAYAMA Daisuke * 3364e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 3365e69e9d4aSHATAYAMA Daisuke * 3366e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 3367e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 3368e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 3369e69e9d4aSHATAYAMA Daisuke * met. 3370e69e9d4aSHATAYAMA Daisuke * 3371e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 3372e69e9d4aSHATAYAMA Daisuke */ 3373e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 3374bdebd6a2SJann Horn void *kaddr, unsigned long pgoff, 3375bdebd6a2SJann Horn unsigned long size) 3376e69e9d4aSHATAYAMA Daisuke { 3377e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 3378bdebd6a2SJann Horn unsigned long off; 3379bdebd6a2SJann Horn unsigned long end_index; 3380bdebd6a2SJann Horn 3381bdebd6a2SJann Horn if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 3382bdebd6a2SJann Horn return -EINVAL; 3383e69e9d4aSHATAYAMA Daisuke 3384e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 3385e69e9d4aSHATAYAMA Daisuke 3386e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 3387e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3388e69e9d4aSHATAYAMA Daisuke 3389e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 3390e69e9d4aSHATAYAMA Daisuke if (!area) 3391e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3392e69e9d4aSHATAYAMA Daisuke 3393fe9041c2SChristoph Hellwig if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 3394e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3395e69e9d4aSHATAYAMA Daisuke 3396bdebd6a2SJann Horn if (check_add_overflow(size, off, &end_index) || 3397bdebd6a2SJann Horn end_index > get_vm_area_size(area)) 3398e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3399bdebd6a2SJann Horn kaddr += off; 3400e69e9d4aSHATAYAMA Daisuke 3401e69e9d4aSHATAYAMA Daisuke do { 3402e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 3403e69e9d4aSHATAYAMA Daisuke int ret; 3404e69e9d4aSHATAYAMA Daisuke 3405e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 3406e69e9d4aSHATAYAMA Daisuke if (ret) 3407e69e9d4aSHATAYAMA Daisuke return ret; 3408e69e9d4aSHATAYAMA Daisuke 3409e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 3410e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 3411e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 3412e69e9d4aSHATAYAMA Daisuke } while (size > 0); 3413e69e9d4aSHATAYAMA Daisuke 3414e69e9d4aSHATAYAMA Daisuke vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3415e69e9d4aSHATAYAMA Daisuke 3416e69e9d4aSHATAYAMA Daisuke return 0; 3417e69e9d4aSHATAYAMA Daisuke } 3418e69e9d4aSHATAYAMA Daisuke 3419e69e9d4aSHATAYAMA Daisuke /** 342083342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 342183342314SNick Piggin * @vma: vma to cover (map full range of vma) 342283342314SNick Piggin * @addr: vmalloc memory 342383342314SNick Piggin * @pgoff: number of pages into addr before first page to map 34247682486bSRandy Dunlap * 34257682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 342683342314SNick Piggin * 342783342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 342883342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 342983342314SNick Piggin * that criteria isn't met. 343083342314SNick Piggin * 343172fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 343283342314SNick Piggin */ 343383342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 343483342314SNick Piggin unsigned long pgoff) 343583342314SNick Piggin { 3436e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 3437bdebd6a2SJann Horn addr, pgoff, 3438e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 343983342314SNick Piggin } 344083342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 344183342314SNick Piggin 34425f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 34435f4352fbSJeremy Fitzhardinge { 34445f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 34455f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 34465f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 34475f4352fbSJeremy Fitzhardinge kfree(area); 34485f4352fbSJeremy Fitzhardinge } 34495f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 3450a10aa579SChristoph Lameter 34514f8b02b4STejun Heo #ifdef CONFIG_SMP 3452ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 3453ca23e405STejun Heo { 34544583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 3455ca23e405STejun Heo } 3456ca23e405STejun Heo 3457ca23e405STejun Heo /** 345868ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 345968ad4a33SUladzislau Rezki (Sony) * @addr: target address 3460ca23e405STejun Heo * 346168ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area 346268ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned 346368ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL 346468ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr. 3465ca23e405STejun Heo */ 346668ad4a33SUladzislau Rezki (Sony) static struct vmap_area * 346768ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr) 3468ca23e405STejun Heo { 346968ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp; 347068ad4a33SUladzislau Rezki (Sony) struct rb_node *n; 347168ad4a33SUladzislau Rezki (Sony) 347268ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node; 347368ad4a33SUladzislau Rezki (Sony) va = NULL; 3474ca23e405STejun Heo 3475ca23e405STejun Heo while (n) { 347668ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node); 347768ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) { 347868ad4a33SUladzislau Rezki (Sony) va = tmp; 347968ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr) 3480ca23e405STejun Heo break; 3481ca23e405STejun Heo 348268ad4a33SUladzislau Rezki (Sony) n = n->rb_right; 3483ca23e405STejun Heo } else { 348468ad4a33SUladzislau Rezki (Sony) n = n->rb_left; 3485ca23e405STejun Heo } 348668ad4a33SUladzislau Rezki (Sony) } 348768ad4a33SUladzislau Rezki (Sony) 348868ad4a33SUladzislau Rezki (Sony) return va; 3489ca23e405STejun Heo } 3490ca23e405STejun Heo 3491ca23e405STejun Heo /** 349268ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address 349368ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END 349468ad4a33SUladzislau Rezki (Sony) * @va: 349568ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order); 349668ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address. 3497799fa85dSAlex Shi * @align: alignment for required highest address 3498ca23e405STejun Heo * 349968ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area 3500ca23e405STejun Heo */ 350168ad4a33SUladzislau Rezki (Sony) static unsigned long 350268ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 3503ca23e405STejun Heo { 350468ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3505ca23e405STejun Heo unsigned long addr; 3506ca23e405STejun Heo 350768ad4a33SUladzislau Rezki (Sony) if (likely(*va)) { 350868ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va), 350968ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) { 351068ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 351168ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr) 351268ad4a33SUladzislau Rezki (Sony) return addr; 351368ad4a33SUladzislau Rezki (Sony) } 3514ca23e405STejun Heo } 3515ca23e405STejun Heo 351668ad4a33SUladzislau Rezki (Sony) return 0; 3517ca23e405STejun Heo } 3518ca23e405STejun Heo 3519ca23e405STejun Heo /** 3520ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 3521ca23e405STejun Heo * @offsets: array containing offset of each area 3522ca23e405STejun Heo * @sizes: array containing size of each area 3523ca23e405STejun Heo * @nr_vms: the number of areas to allocate 3524ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 3525ca23e405STejun Heo * 3526ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 3527ca23e405STejun Heo * vm_structs on success, %NULL on failure 3528ca23e405STejun Heo * 3529ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 3530ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 3531ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 3532ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 3533ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 3534ec3f64fcSDavid Rientjes * areas are allocated from top. 3535ca23e405STejun Heo * 3536ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 353768ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking 353868ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the 353968ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till 354068ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted 354168ad4a33SUladzislau Rezki (Sony) * and the result is returned. 3542ca23e405STejun Heo */ 3543ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 3544ca23e405STejun Heo const size_t *sizes, int nr_vms, 3545ec3f64fcSDavid Rientjes size_t align) 3546ca23e405STejun Heo { 3547ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 3548ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 354968ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va; 3550ca23e405STejun Heo struct vm_struct **vms; 3551ca23e405STejun Heo int area, area2, last_area, term_area; 3552253a496dSDaniel Axtens unsigned long base, start, size, end, last_end, orig_start, orig_end; 3553ca23e405STejun Heo bool purged = false; 355468ad4a33SUladzislau Rezki (Sony) enum fit_type type; 3555ca23e405STejun Heo 3556ca23e405STejun Heo /* verify parameters and allocate data structures */ 3557891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 3558ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 3559ca23e405STejun Heo start = offsets[area]; 3560ca23e405STejun Heo end = start + sizes[area]; 3561ca23e405STejun Heo 3562ca23e405STejun Heo /* is everything aligned properly? */ 3563ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 3564ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 3565ca23e405STejun Heo 3566ca23e405STejun Heo /* detect the area with the highest address */ 3567ca23e405STejun Heo if (start > offsets[last_area]) 3568ca23e405STejun Heo last_area = area; 3569ca23e405STejun Heo 3570c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) { 3571ca23e405STejun Heo unsigned long start2 = offsets[area2]; 3572ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 3573ca23e405STejun Heo 3574c568da28SWei Yang BUG_ON(start2 < end && start < end2); 3575ca23e405STejun Heo } 3576ca23e405STejun Heo } 3577ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 3578ca23e405STejun Heo 3579ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 3580ca23e405STejun Heo WARN_ON(true); 3581ca23e405STejun Heo return NULL; 3582ca23e405STejun Heo } 3583ca23e405STejun Heo 35844d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 35854d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 3586ca23e405STejun Heo if (!vas || !vms) 3587f1db7afdSKautuk Consul goto err_free2; 3588ca23e405STejun Heo 3589ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 359068ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 3591ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 3592ca23e405STejun Heo if (!vas[area] || !vms[area]) 3593ca23e405STejun Heo goto err_free; 3594ca23e405STejun Heo } 3595ca23e405STejun Heo retry: 3596e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 3597ca23e405STejun Heo 3598ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 3599ca23e405STejun Heo area = term_area = last_area; 3600ca23e405STejun Heo start = offsets[area]; 3601ca23e405STejun Heo end = start + sizes[area]; 3602ca23e405STejun Heo 360368ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end); 360468ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3605ca23e405STejun Heo 3606ca23e405STejun Heo while (true) { 3607ca23e405STejun Heo /* 3608ca23e405STejun Heo * base might have underflowed, add last_end before 3609ca23e405STejun Heo * comparing. 3610ca23e405STejun Heo */ 361168ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end) 361268ad4a33SUladzislau Rezki (Sony) goto overflow; 3613ca23e405STejun Heo 3614ca23e405STejun Heo /* 361568ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found. 3616ca23e405STejun Heo */ 361768ad4a33SUladzislau Rezki (Sony) if (va == NULL) 361868ad4a33SUladzislau Rezki (Sony) goto overflow; 3619ca23e405STejun Heo 3620ca23e405STejun Heo /* 3621d8cc323dSQiujun Huang * If required width exceeds current VA block, move 36225336e52cSKuppuswamy Sathyanarayanan * base downwards and then recheck. 36235336e52cSKuppuswamy Sathyanarayanan */ 36245336e52cSKuppuswamy Sathyanarayanan if (base + end > va->va_end) { 36255336e52cSKuppuswamy Sathyanarayanan base = pvm_determine_end_from_reverse(&va, align) - end; 36265336e52cSKuppuswamy Sathyanarayanan term_area = area; 36275336e52cSKuppuswamy Sathyanarayanan continue; 36285336e52cSKuppuswamy Sathyanarayanan } 36295336e52cSKuppuswamy Sathyanarayanan 36305336e52cSKuppuswamy Sathyanarayanan /* 363168ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck. 3632ca23e405STejun Heo */ 36335336e52cSKuppuswamy Sathyanarayanan if (base + start < va->va_start) { 363468ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node)); 363568ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3636ca23e405STejun Heo term_area = area; 3637ca23e405STejun Heo continue; 3638ca23e405STejun Heo } 3639ca23e405STejun Heo 3640ca23e405STejun Heo /* 3641ca23e405STejun Heo * This area fits, move on to the previous one. If 3642ca23e405STejun Heo * the previous one is the terminal one, we're done. 3643ca23e405STejun Heo */ 3644ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 3645ca23e405STejun Heo if (area == term_area) 3646ca23e405STejun Heo break; 364768ad4a33SUladzislau Rezki (Sony) 3648ca23e405STejun Heo start = offsets[area]; 3649ca23e405STejun Heo end = start + sizes[area]; 365068ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end); 3651ca23e405STejun Heo } 365268ad4a33SUladzislau Rezki (Sony) 3653ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 3654ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 365568ad4a33SUladzislau Rezki (Sony) int ret; 3656ca23e405STejun Heo 365768ad4a33SUladzislau Rezki (Sony) start = base + offsets[area]; 365868ad4a33SUladzislau Rezki (Sony) size = sizes[area]; 365968ad4a33SUladzislau Rezki (Sony) 366068ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start); 366168ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL)) 366268ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 366368ad4a33SUladzislau Rezki (Sony) goto recovery; 366468ad4a33SUladzislau Rezki (Sony) 366568ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, start, size); 366668ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 366768ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 366868ad4a33SUladzislau Rezki (Sony) goto recovery; 366968ad4a33SUladzislau Rezki (Sony) 367068ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, start, size, type); 367168ad4a33SUladzislau Rezki (Sony) if (unlikely(ret)) 367268ad4a33SUladzislau Rezki (Sony) goto recovery; 367368ad4a33SUladzislau Rezki (Sony) 367468ad4a33SUladzislau Rezki (Sony) /* Allocated area. */ 367568ad4a33SUladzislau Rezki (Sony) va = vas[area]; 367668ad4a33SUladzislau Rezki (Sony) va->va_start = start; 367768ad4a33SUladzislau Rezki (Sony) va->va_end = start + size; 3678ca23e405STejun Heo } 3679ca23e405STejun Heo 3680e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 3681ca23e405STejun Heo 3682253a496dSDaniel Axtens /* populate the kasan shadow space */ 3683253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 3684253a496dSDaniel Axtens if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 3685253a496dSDaniel Axtens goto err_free_shadow; 3686253a496dSDaniel Axtens 3687253a496dSDaniel Axtens kasan_unpoison_vmalloc((void *)vas[area]->va_start, 3688253a496dSDaniel Axtens sizes[area]); 3689253a496dSDaniel Axtens } 3690253a496dSDaniel Axtens 3691ca23e405STejun Heo /* insert all vm's */ 3692e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 3693e36176beSUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 3694e36176beSUladzislau Rezki (Sony) insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); 3695e36176beSUladzislau Rezki (Sony) 3696e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, 3697ca23e405STejun Heo pcpu_get_vm_areas); 3698e36176beSUladzislau Rezki (Sony) } 3699e36176beSUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 3700ca23e405STejun Heo 3701ca23e405STejun Heo kfree(vas); 3702ca23e405STejun Heo return vms; 3703ca23e405STejun Heo 370468ad4a33SUladzislau Rezki (Sony) recovery: 3705e36176beSUladzislau Rezki (Sony) /* 3706e36176beSUladzislau Rezki (Sony) * Remove previously allocated areas. There is no 3707e36176beSUladzislau Rezki (Sony) * need in removing these areas from the busy tree, 3708e36176beSUladzislau Rezki (Sony) * because they are inserted only on the final step 3709e36176beSUladzislau Rezki (Sony) * and when pcpu_get_vm_areas() is success. 3710e36176beSUladzislau Rezki (Sony) */ 371168ad4a33SUladzislau Rezki (Sony) while (area--) { 3712253a496dSDaniel Axtens orig_start = vas[area]->va_start; 3713253a496dSDaniel Axtens orig_end = vas[area]->va_end; 371496e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 37153c5c3cfbSDaniel Axtens &free_vmap_area_list); 37169c801f61SUladzislau Rezki (Sony) if (va) 3717253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 3718253a496dSDaniel Axtens va->va_start, va->va_end); 371968ad4a33SUladzislau Rezki (Sony) vas[area] = NULL; 372068ad4a33SUladzislau Rezki (Sony) } 372168ad4a33SUladzislau Rezki (Sony) 372268ad4a33SUladzislau Rezki (Sony) overflow: 3723e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 372468ad4a33SUladzislau Rezki (Sony) if (!purged) { 372568ad4a33SUladzislau Rezki (Sony) purge_vmap_area_lazy(); 372668ad4a33SUladzislau Rezki (Sony) purged = true; 372768ad4a33SUladzislau Rezki (Sony) 372868ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */ 372968ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 373068ad4a33SUladzislau Rezki (Sony) if (vas[area]) 373168ad4a33SUladzislau Rezki (Sony) continue; 373268ad4a33SUladzislau Rezki (Sony) 373368ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc( 373468ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL); 373568ad4a33SUladzislau Rezki (Sony) if (!vas[area]) 373668ad4a33SUladzislau Rezki (Sony) goto err_free; 373768ad4a33SUladzislau Rezki (Sony) } 373868ad4a33SUladzislau Rezki (Sony) 373968ad4a33SUladzislau Rezki (Sony) goto retry; 374068ad4a33SUladzislau Rezki (Sony) } 374168ad4a33SUladzislau Rezki (Sony) 3742ca23e405STejun Heo err_free: 3743ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 374468ad4a33SUladzislau Rezki (Sony) if (vas[area]) 374568ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]); 374668ad4a33SUladzislau Rezki (Sony) 3747ca23e405STejun Heo kfree(vms[area]); 3748ca23e405STejun Heo } 3749f1db7afdSKautuk Consul err_free2: 3750ca23e405STejun Heo kfree(vas); 3751ca23e405STejun Heo kfree(vms); 3752ca23e405STejun Heo return NULL; 3753253a496dSDaniel Axtens 3754253a496dSDaniel Axtens err_free_shadow: 3755253a496dSDaniel Axtens spin_lock(&free_vmap_area_lock); 3756253a496dSDaniel Axtens /* 3757253a496dSDaniel Axtens * We release all the vmalloc shadows, even the ones for regions that 3758253a496dSDaniel Axtens * hadn't been successfully added. This relies on kasan_release_vmalloc 3759253a496dSDaniel Axtens * being able to tolerate this case. 3760253a496dSDaniel Axtens */ 3761253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 3762253a496dSDaniel Axtens orig_start = vas[area]->va_start; 3763253a496dSDaniel Axtens orig_end = vas[area]->va_end; 376496e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 3765253a496dSDaniel Axtens &free_vmap_area_list); 37669c801f61SUladzislau Rezki (Sony) if (va) 3767253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 3768253a496dSDaniel Axtens va->va_start, va->va_end); 3769253a496dSDaniel Axtens vas[area] = NULL; 3770253a496dSDaniel Axtens kfree(vms[area]); 3771253a496dSDaniel Axtens } 3772253a496dSDaniel Axtens spin_unlock(&free_vmap_area_lock); 3773253a496dSDaniel Axtens kfree(vas); 3774253a496dSDaniel Axtens kfree(vms); 3775253a496dSDaniel Axtens return NULL; 3776ca23e405STejun Heo } 3777ca23e405STejun Heo 3778ca23e405STejun Heo /** 3779ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 3780ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 3781ca23e405STejun Heo * @nr_vms: the number of allocated areas 3782ca23e405STejun Heo * 3783ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 3784ca23e405STejun Heo */ 3785ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 3786ca23e405STejun Heo { 3787ca23e405STejun Heo int i; 3788ca23e405STejun Heo 3789ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 3790ca23e405STejun Heo free_vm_area(vms[i]); 3791ca23e405STejun Heo kfree(vms); 3792ca23e405STejun Heo } 37934f8b02b4STejun Heo #endif /* CONFIG_SMP */ 3794a10aa579SChristoph Lameter 37955bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK 379698f18083SPaul E. McKenney bool vmalloc_dump_obj(void *object) 379798f18083SPaul E. McKenney { 379898f18083SPaul E. McKenney struct vm_struct *vm; 379998f18083SPaul E. McKenney void *objp = (void *)PAGE_ALIGN((unsigned long)object); 380098f18083SPaul E. McKenney 380198f18083SPaul E. McKenney vm = find_vm_area(objp); 380298f18083SPaul E. McKenney if (!vm) 380398f18083SPaul E. McKenney return false; 3804bd34dcd4SPaul E. McKenney pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 3805bd34dcd4SPaul E. McKenney vm->nr_pages, (unsigned long)vm->addr, vm->caller); 380698f18083SPaul E. McKenney return true; 380798f18083SPaul E. McKenney } 38085bb1bb35SPaul E. McKenney #endif 380998f18083SPaul E. McKenney 3810a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 3811a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 3812e36176beSUladzislau Rezki (Sony) __acquires(&vmap_purge_lock) 3813d4033afdSJoonsoo Kim __acquires(&vmap_area_lock) 3814a10aa579SChristoph Lameter { 3815e36176beSUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock); 3816d4033afdSJoonsoo Kim spin_lock(&vmap_area_lock); 3817e36176beSUladzislau Rezki (Sony) 38183f500069Szijun_hu return seq_list_start(&vmap_area_list, *pos); 3819a10aa579SChristoph Lameter } 3820a10aa579SChristoph Lameter 3821a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3822a10aa579SChristoph Lameter { 38233f500069Szijun_hu return seq_list_next(p, &vmap_area_list, pos); 3824a10aa579SChristoph Lameter } 3825a10aa579SChristoph Lameter 3826a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 3827d4033afdSJoonsoo Kim __releases(&vmap_area_lock) 38280a7dd4e9SWaiman Long __releases(&vmap_purge_lock) 3829a10aa579SChristoph Lameter { 3830d4033afdSJoonsoo Kim spin_unlock(&vmap_area_lock); 38310a7dd4e9SWaiman Long mutex_unlock(&vmap_purge_lock); 3832a10aa579SChristoph Lameter } 3833a10aa579SChristoph Lameter 3834a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 3835a47a126aSEric Dumazet { 3836e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 3837a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 3838a47a126aSEric Dumazet 3839a47a126aSEric Dumazet if (!counters) 3840a47a126aSEric Dumazet return; 3841a47a126aSEric Dumazet 3842af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 3843af12346cSWanpeng Li return; 38447e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 38457e5b528bSDmitry Vyukov smp_rmb(); 3846af12346cSWanpeng Li 3847a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 3848a47a126aSEric Dumazet 3849a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 3850a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 3851a47a126aSEric Dumazet 3852a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 3853a47a126aSEric Dumazet if (counters[nr]) 3854a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 3855a47a126aSEric Dumazet } 3856a47a126aSEric Dumazet } 3857a47a126aSEric Dumazet 3858dd3b8353SUladzislau Rezki (Sony) static void show_purge_info(struct seq_file *m) 3859dd3b8353SUladzislau Rezki (Sony) { 3860dd3b8353SUladzislau Rezki (Sony) struct vmap_area *va; 3861dd3b8353SUladzislau Rezki (Sony) 386296e2db45SUladzislau Rezki (Sony) spin_lock(&purge_vmap_area_lock); 386396e2db45SUladzislau Rezki (Sony) list_for_each_entry(va, &purge_vmap_area_list, list) { 3864dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 3865dd3b8353SUladzislau Rezki (Sony) (void *)va->va_start, (void *)va->va_end, 3866dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 3867dd3b8353SUladzislau Rezki (Sony) } 386896e2db45SUladzislau Rezki (Sony) spin_unlock(&purge_vmap_area_lock); 3869dd3b8353SUladzislau Rezki (Sony) } 3870dd3b8353SUladzislau Rezki (Sony) 3871a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 3872a10aa579SChristoph Lameter { 38733f500069Szijun_hu struct vmap_area *va; 3874d4033afdSJoonsoo Kim struct vm_struct *v; 3875d4033afdSJoonsoo Kim 38763f500069Szijun_hu va = list_entry(p, struct vmap_area, list); 38773f500069Szijun_hu 3878c2ce8c14SWanpeng Li /* 3879688fcbfcSPengfei Li * s_show can encounter race with remove_vm_area, !vm on behalf 3880688fcbfcSPengfei Li * of vmap area is being tear down or vm_map_ram allocation. 3881c2ce8c14SWanpeng Li */ 3882688fcbfcSPengfei Li if (!va->vm) { 3883dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 388478c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end, 3885dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 388678c72746SYisheng Xie 3887d4033afdSJoonsoo Kim return 0; 388878c72746SYisheng Xie } 3889d4033afdSJoonsoo Kim 3890d4033afdSJoonsoo Kim v = va->vm; 3891a10aa579SChristoph Lameter 389245ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 3893a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 3894a10aa579SChristoph Lameter 389562c70bceSJoe Perches if (v->caller) 389662c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 389723016969SChristoph Lameter 3898a10aa579SChristoph Lameter if (v->nr_pages) 3899a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 3900a10aa579SChristoph Lameter 3901a10aa579SChristoph Lameter if (v->phys_addr) 3902199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr); 3903a10aa579SChristoph Lameter 3904a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 3905f4527c90SFabian Frederick seq_puts(m, " ioremap"); 3906a10aa579SChristoph Lameter 3907a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 3908f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 3909a10aa579SChristoph Lameter 3910a10aa579SChristoph Lameter if (v->flags & VM_MAP) 3911f4527c90SFabian Frederick seq_puts(m, " vmap"); 3912a10aa579SChristoph Lameter 3913a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 3914f4527c90SFabian Frederick seq_puts(m, " user"); 3915a10aa579SChristoph Lameter 3916fe9041c2SChristoph Hellwig if (v->flags & VM_DMA_COHERENT) 3917fe9041c2SChristoph Hellwig seq_puts(m, " dma-coherent"); 3918fe9041c2SChristoph Hellwig 3919244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 3920f4527c90SFabian Frederick seq_puts(m, " vpages"); 3921a10aa579SChristoph Lameter 3922a47a126aSEric Dumazet show_numa_info(m, v); 3923a10aa579SChristoph Lameter seq_putc(m, '\n'); 3924dd3b8353SUladzislau Rezki (Sony) 3925dd3b8353SUladzislau Rezki (Sony) /* 392696e2db45SUladzislau Rezki (Sony) * As a final step, dump "unpurged" areas. 3927dd3b8353SUladzislau Rezki (Sony) */ 3928dd3b8353SUladzislau Rezki (Sony) if (list_is_last(&va->list, &vmap_area_list)) 3929dd3b8353SUladzislau Rezki (Sony) show_purge_info(m); 3930dd3b8353SUladzislau Rezki (Sony) 3931a10aa579SChristoph Lameter return 0; 3932a10aa579SChristoph Lameter } 3933a10aa579SChristoph Lameter 39345f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 3935a10aa579SChristoph Lameter .start = s_start, 3936a10aa579SChristoph Lameter .next = s_next, 3937a10aa579SChristoph Lameter .stop = s_stop, 3938a10aa579SChristoph Lameter .show = s_show, 3939a10aa579SChristoph Lameter }; 39405f6a6a9cSAlexey Dobriyan 39415f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 39425f6a6a9cSAlexey Dobriyan { 3943fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA)) 39440825a6f9SJoe Perches proc_create_seq_private("vmallocinfo", 0400, NULL, 394544414d82SChristoph Hellwig &vmalloc_op, 394644414d82SChristoph Hellwig nr_node_ids * sizeof(unsigned int), NULL); 3947fddda2b7SChristoph Hellwig else 39480825a6f9SJoe Perches proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 39495f6a6a9cSAlexey Dobriyan return 0; 39505f6a6a9cSAlexey Dobriyan } 39515f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 3952db3808c1SJoonsoo Kim 3953a10aa579SChristoph Lameter #endif 3954