1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/vmalloc.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 61da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 71da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 81da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 9930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 12db64fe02SNick Piggin #include <linux/vmalloc.h> 131da177e4SLinus Torvalds #include <linux/mm.h> 141da177e4SLinus Torvalds #include <linux/module.h> 151da177e4SLinus Torvalds #include <linux/highmem.h> 16c3edc401SIngo Molnar #include <linux/sched/signal.h> 171da177e4SLinus Torvalds #include <linux/slab.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 191da177e4SLinus Torvalds #include <linux/interrupt.h> 205f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 21a10aa579SChristoph Lameter #include <linux/seq_file.h> 22868b104dSRick Edgecombe #include <linux/set_memory.h> 233ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2423016969SChristoph Lameter #include <linux/kallsyms.h> 25db64fe02SNick Piggin #include <linux/list.h> 264da56b99SChris Wilson #include <linux/notifier.h> 27db64fe02SNick Piggin #include <linux/rbtree.h> 28*0f14599cSMatthew Wilcox (Oracle) #include <linux/xarray.h> 29db64fe02SNick Piggin #include <linux/rcupdate.h> 30f0aa6617STejun Heo #include <linux/pfn.h> 3189219d37SCatalin Marinas #include <linux/kmemleak.h> 3260063497SArun Sharma #include <linux/atomic.h> 333b32123dSGideon Israel Dsouza #include <linux/compiler.h> 3432fcfd40SAl Viro #include <linux/llist.h> 350f616be1SToshi Kani #include <linux/bitops.h> 3668ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h> 37bdebd6a2SJann Horn #include <linux/overflow.h> 383b32123dSGideon Israel Dsouza 397c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 401da177e4SLinus Torvalds #include <asm/tlbflush.h> 412dca6999SDavid Miller #include <asm/shmparam.h> 421da177e4SLinus Torvalds 43dd56b046SMel Gorman #include "internal.h" 442a681cfaSJoerg Roedel #include "pgalloc-track.h" 45dd56b046SMel Gorman 46186525bdSIngo Molnar bool is_vmalloc_addr(const void *x) 47186525bdSIngo Molnar { 48186525bdSIngo Molnar unsigned long addr = (unsigned long)x; 49186525bdSIngo Molnar 50186525bdSIngo Molnar return addr >= VMALLOC_START && addr < VMALLOC_END; 51186525bdSIngo Molnar } 52186525bdSIngo Molnar EXPORT_SYMBOL(is_vmalloc_addr); 53186525bdSIngo Molnar 5432fcfd40SAl Viro struct vfree_deferred { 5532fcfd40SAl Viro struct llist_head list; 5632fcfd40SAl Viro struct work_struct wq; 5732fcfd40SAl Viro }; 5832fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 5932fcfd40SAl Viro 6032fcfd40SAl Viro static void __vunmap(const void *, int); 6132fcfd40SAl Viro 6232fcfd40SAl Viro static void free_work(struct work_struct *w) 6332fcfd40SAl Viro { 6432fcfd40SAl Viro struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 65894e58c1SByungchul Park struct llist_node *t, *llnode; 66894e58c1SByungchul Park 67894e58c1SByungchul Park llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 68894e58c1SByungchul Park __vunmap((void *)llnode, 1); 6932fcfd40SAl Viro } 7032fcfd40SAl Viro 71db64fe02SNick Piggin /*** Page table manipulation functions ***/ 72b221385bSAdrian Bunk 732ba3e694SJoerg Roedel static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 742ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 751da177e4SLinus Torvalds { 761da177e4SLinus Torvalds pte_t *pte; 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 791da177e4SLinus Torvalds do { 801da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 811da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 821da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 832ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 841da177e4SLinus Torvalds } 851da177e4SLinus Torvalds 862ba3e694SJoerg Roedel static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 872ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 881da177e4SLinus Torvalds { 891da177e4SLinus Torvalds pmd_t *pmd; 901da177e4SLinus Torvalds unsigned long next; 912ba3e694SJoerg Roedel int cleared; 921da177e4SLinus Torvalds 931da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 941da177e4SLinus Torvalds do { 951da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 962ba3e694SJoerg Roedel 972ba3e694SJoerg Roedel cleared = pmd_clear_huge(pmd); 982ba3e694SJoerg Roedel if (cleared || pmd_bad(*pmd)) 992ba3e694SJoerg Roedel *mask |= PGTBL_PMD_MODIFIED; 1002ba3e694SJoerg Roedel 1012ba3e694SJoerg Roedel if (cleared) 102b9820d8fSToshi Kani continue; 1031da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 1041da177e4SLinus Torvalds continue; 1052ba3e694SJoerg Roedel vunmap_pte_range(pmd, addr, next, mask); 1061da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1071da177e4SLinus Torvalds } 1081da177e4SLinus Torvalds 1092ba3e694SJoerg Roedel static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 1102ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 1111da177e4SLinus Torvalds { 1121da177e4SLinus Torvalds pud_t *pud; 1131da177e4SLinus Torvalds unsigned long next; 1142ba3e694SJoerg Roedel int cleared; 1151da177e4SLinus Torvalds 116c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 1171da177e4SLinus Torvalds do { 1181da177e4SLinus Torvalds next = pud_addr_end(addr, end); 1192ba3e694SJoerg Roedel 1202ba3e694SJoerg Roedel cleared = pud_clear_huge(pud); 1212ba3e694SJoerg Roedel if (cleared || pud_bad(*pud)) 1222ba3e694SJoerg Roedel *mask |= PGTBL_PUD_MODIFIED; 1232ba3e694SJoerg Roedel 1242ba3e694SJoerg Roedel if (cleared) 125b9820d8fSToshi Kani continue; 1261da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 1271da177e4SLinus Torvalds continue; 1282ba3e694SJoerg Roedel vunmap_pmd_range(pud, addr, next, mask); 1291da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1301da177e4SLinus Torvalds } 1311da177e4SLinus Torvalds 1322ba3e694SJoerg Roedel static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 1332ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 134c2febafcSKirill A. Shutemov { 135c2febafcSKirill A. Shutemov p4d_t *p4d; 136c2febafcSKirill A. Shutemov unsigned long next; 1372ba3e694SJoerg Roedel int cleared; 138c2febafcSKirill A. Shutemov 139c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 140c2febafcSKirill A. Shutemov do { 141c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 1422ba3e694SJoerg Roedel 1432ba3e694SJoerg Roedel cleared = p4d_clear_huge(p4d); 1442ba3e694SJoerg Roedel if (cleared || p4d_bad(*p4d)) 1452ba3e694SJoerg Roedel *mask |= PGTBL_P4D_MODIFIED; 1462ba3e694SJoerg Roedel 1472ba3e694SJoerg Roedel if (cleared) 148c2febafcSKirill A. Shutemov continue; 149c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 150c2febafcSKirill A. Shutemov continue; 1512ba3e694SJoerg Roedel vunmap_pud_range(p4d, addr, next, mask); 152c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 153c2febafcSKirill A. Shutemov } 154c2febafcSKirill A. Shutemov 155b521c43fSChristoph Hellwig /** 156b521c43fSChristoph Hellwig * unmap_kernel_range_noflush - unmap kernel VM area 1572ba3e694SJoerg Roedel * @start: start of the VM area to unmap 158b521c43fSChristoph Hellwig * @size: size of the VM area to unmap 159b521c43fSChristoph Hellwig * 160b521c43fSChristoph Hellwig * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify 161b521c43fSChristoph Hellwig * should have been allocated using get_vm_area() and its friends. 162b521c43fSChristoph Hellwig * 163b521c43fSChristoph Hellwig * NOTE: 164b521c43fSChristoph Hellwig * This function does NOT do any cache flushing. The caller is responsible 165b521c43fSChristoph Hellwig * for calling flush_cache_vunmap() on to-be-mapped areas before calling this 166b521c43fSChristoph Hellwig * function and flush_tlb_kernel_range() after. 167b521c43fSChristoph Hellwig */ 1682ba3e694SJoerg Roedel void unmap_kernel_range_noflush(unsigned long start, unsigned long size) 1691da177e4SLinus Torvalds { 1702ba3e694SJoerg Roedel unsigned long end = start + size; 1711da177e4SLinus Torvalds unsigned long next; 172b521c43fSChristoph Hellwig pgd_t *pgd; 1732ba3e694SJoerg Roedel unsigned long addr = start; 1742ba3e694SJoerg Roedel pgtbl_mod_mask mask = 0; 1751da177e4SLinus Torvalds 1761da177e4SLinus Torvalds BUG_ON(addr >= end); 1772ba3e694SJoerg Roedel start = addr; 1781da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1791da177e4SLinus Torvalds do { 1801da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 1812ba3e694SJoerg Roedel if (pgd_bad(*pgd)) 1822ba3e694SJoerg Roedel mask |= PGTBL_PGD_MODIFIED; 1831da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 1841da177e4SLinus Torvalds continue; 1852ba3e694SJoerg Roedel vunmap_p4d_range(pgd, addr, next, &mask); 1861da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1872ba3e694SJoerg Roedel 1882ba3e694SJoerg Roedel if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 1892ba3e694SJoerg Roedel arch_sync_kernel_mappings(start, end); 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds 1921da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 1932ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 1942ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 1951da177e4SLinus Torvalds { 1961da177e4SLinus Torvalds pte_t *pte; 1971da177e4SLinus Torvalds 198db64fe02SNick Piggin /* 199db64fe02SNick Piggin * nr is a running index into the array which helps higher level 200db64fe02SNick Piggin * callers keep track of where we're up to. 201db64fe02SNick Piggin */ 202db64fe02SNick Piggin 2032ba3e694SJoerg Roedel pte = pte_alloc_kernel_track(pmd, addr, mask); 2041da177e4SLinus Torvalds if (!pte) 2051da177e4SLinus Torvalds return -ENOMEM; 2061da177e4SLinus Torvalds do { 207db64fe02SNick Piggin struct page *page = pages[*nr]; 208db64fe02SNick Piggin 209db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 210db64fe02SNick Piggin return -EBUSY; 211db64fe02SNick Piggin if (WARN_ON(!page)) 2121da177e4SLinus Torvalds return -ENOMEM; 2131da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 214db64fe02SNick Piggin (*nr)++; 2151da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 2162ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 2171da177e4SLinus Torvalds return 0; 2181da177e4SLinus Torvalds } 2191da177e4SLinus Torvalds 220db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 2212ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 2222ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 2231da177e4SLinus Torvalds { 2241da177e4SLinus Torvalds pmd_t *pmd; 2251da177e4SLinus Torvalds unsigned long next; 2261da177e4SLinus Torvalds 2272ba3e694SJoerg Roedel pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 2281da177e4SLinus Torvalds if (!pmd) 2291da177e4SLinus Torvalds return -ENOMEM; 2301da177e4SLinus Torvalds do { 2311da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 2322ba3e694SJoerg Roedel if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask)) 2331da177e4SLinus Torvalds return -ENOMEM; 2341da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 2351da177e4SLinus Torvalds return 0; 2361da177e4SLinus Torvalds } 2371da177e4SLinus Torvalds 238c2febafcSKirill A. Shutemov static int vmap_pud_range(p4d_t *p4d, unsigned long addr, 2392ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 2402ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 2411da177e4SLinus Torvalds { 2421da177e4SLinus Torvalds pud_t *pud; 2431da177e4SLinus Torvalds unsigned long next; 2441da177e4SLinus Torvalds 2452ba3e694SJoerg Roedel pud = pud_alloc_track(&init_mm, p4d, addr, mask); 2461da177e4SLinus Torvalds if (!pud) 2471da177e4SLinus Torvalds return -ENOMEM; 2481da177e4SLinus Torvalds do { 2491da177e4SLinus Torvalds next = pud_addr_end(addr, end); 2502ba3e694SJoerg Roedel if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask)) 2511da177e4SLinus Torvalds return -ENOMEM; 2521da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 2531da177e4SLinus Torvalds return 0; 2541da177e4SLinus Torvalds } 2551da177e4SLinus Torvalds 256c2febafcSKirill A. Shutemov static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, 2572ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 2582ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 259c2febafcSKirill A. Shutemov { 260c2febafcSKirill A. Shutemov p4d_t *p4d; 261c2febafcSKirill A. Shutemov unsigned long next; 262c2febafcSKirill A. Shutemov 2632ba3e694SJoerg Roedel p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 264c2febafcSKirill A. Shutemov if (!p4d) 265c2febafcSKirill A. Shutemov return -ENOMEM; 266c2febafcSKirill A. Shutemov do { 267c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 2682ba3e694SJoerg Roedel if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask)) 269c2febafcSKirill A. Shutemov return -ENOMEM; 270c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 271c2febafcSKirill A. Shutemov return 0; 272c2febafcSKirill A. Shutemov } 273c2febafcSKirill A. Shutemov 274b521c43fSChristoph Hellwig /** 275b521c43fSChristoph Hellwig * map_kernel_range_noflush - map kernel VM area with the specified pages 276b521c43fSChristoph Hellwig * @addr: start of the VM area to map 277b521c43fSChristoph Hellwig * @size: size of the VM area to map 278b521c43fSChristoph Hellwig * @prot: page protection flags to use 279b521c43fSChristoph Hellwig * @pages: pages to map 280db64fe02SNick Piggin * 281b521c43fSChristoph Hellwig * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should 282b521c43fSChristoph Hellwig * have been allocated using get_vm_area() and its friends. 283b521c43fSChristoph Hellwig * 284b521c43fSChristoph Hellwig * NOTE: 285b521c43fSChristoph Hellwig * This function does NOT do any cache flushing. The caller is responsible for 286b521c43fSChristoph Hellwig * calling flush_cache_vmap() on to-be-mapped areas before calling this 287b521c43fSChristoph Hellwig * function. 288b521c43fSChristoph Hellwig * 289b521c43fSChristoph Hellwig * RETURNS: 29060bb4465SChristoph Hellwig * 0 on success, -errno on failure. 291db64fe02SNick Piggin */ 292b521c43fSChristoph Hellwig int map_kernel_range_noflush(unsigned long addr, unsigned long size, 293db64fe02SNick Piggin pgprot_t prot, struct page **pages) 2941da177e4SLinus Torvalds { 2952ba3e694SJoerg Roedel unsigned long start = addr; 296b521c43fSChristoph Hellwig unsigned long end = addr + size; 2971da177e4SLinus Torvalds unsigned long next; 298b521c43fSChristoph Hellwig pgd_t *pgd; 299db64fe02SNick Piggin int err = 0; 300db64fe02SNick Piggin int nr = 0; 3012ba3e694SJoerg Roedel pgtbl_mod_mask mask = 0; 3021da177e4SLinus Torvalds 3031da177e4SLinus Torvalds BUG_ON(addr >= end); 3041da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 3051da177e4SLinus Torvalds do { 3061da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 3072ba3e694SJoerg Roedel if (pgd_bad(*pgd)) 3082ba3e694SJoerg Roedel mask |= PGTBL_PGD_MODIFIED; 3092ba3e694SJoerg Roedel err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 3101da177e4SLinus Torvalds if (err) 311bf88c8c8SFigo.zhang return err; 3121da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 313db64fe02SNick Piggin 3142ba3e694SJoerg Roedel if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 3152ba3e694SJoerg Roedel arch_sync_kernel_mappings(start, end); 3162ba3e694SJoerg Roedel 31760bb4465SChristoph Hellwig return 0; 3181da177e4SLinus Torvalds } 3191da177e4SLinus Torvalds 320ed1f324cSChristoph Hellwig int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, 321ed1f324cSChristoph Hellwig struct page **pages) 3228fc48985STejun Heo { 3238fc48985STejun Heo int ret; 3248fc48985STejun Heo 325a29adb62SChristoph Hellwig ret = map_kernel_range_noflush(start, size, prot, pages); 326a29adb62SChristoph Hellwig flush_cache_vmap(start, start + size); 3278fc48985STejun Heo return ret; 3288fc48985STejun Heo } 3298fc48985STejun Heo 33081ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 33173bdf0a6SLinus Torvalds { 33273bdf0a6SLinus Torvalds /* 333ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 33473bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 33573bdf0a6SLinus Torvalds * just put it in the vmalloc space. 33673bdf0a6SLinus Torvalds */ 33773bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 33873bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 33973bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 34073bdf0a6SLinus Torvalds return 1; 34173bdf0a6SLinus Torvalds #endif 34273bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 34373bdf0a6SLinus Torvalds } 34473bdf0a6SLinus Torvalds 34548667e7aSChristoph Lameter /* 346add688fbSmalc * Walk a vmap address to the struct page it maps. 34748667e7aSChristoph Lameter */ 348add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 34948667e7aSChristoph Lameter { 35048667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 351add688fbSmalc struct page *page = NULL; 35248667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 353c2febafcSKirill A. Shutemov p4d_t *p4d; 354c2febafcSKirill A. Shutemov pud_t *pud; 355c2febafcSKirill A. Shutemov pmd_t *pmd; 356c2febafcSKirill A. Shutemov pte_t *ptep, pte; 35748667e7aSChristoph Lameter 3587aa413deSIngo Molnar /* 3597aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 3607aa413deSIngo Molnar * architectures that do not vmalloc module space 3617aa413deSIngo Molnar */ 36273bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 36359ea7463SJiri Slaby 364c2febafcSKirill A. Shutemov if (pgd_none(*pgd)) 365c2febafcSKirill A. Shutemov return NULL; 366c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 367c2febafcSKirill A. Shutemov if (p4d_none(*p4d)) 368c2febafcSKirill A. Shutemov return NULL; 369c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 370029c54b0SArd Biesheuvel 371029c54b0SArd Biesheuvel /* 372029c54b0SArd Biesheuvel * Don't dereference bad PUD or PMD (below) entries. This will also 373029c54b0SArd Biesheuvel * identify huge mappings, which we may encounter on architectures 374029c54b0SArd Biesheuvel * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be 375029c54b0SArd Biesheuvel * identified as vmalloc addresses by is_vmalloc_addr(), but are 376029c54b0SArd Biesheuvel * not [unambiguously] associated with a struct page, so there is 377029c54b0SArd Biesheuvel * no correct value to return for them. 378029c54b0SArd Biesheuvel */ 379029c54b0SArd Biesheuvel WARN_ON_ONCE(pud_bad(*pud)); 380029c54b0SArd Biesheuvel if (pud_none(*pud) || pud_bad(*pud)) 381c2febafcSKirill A. Shutemov return NULL; 382c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr); 383029c54b0SArd Biesheuvel WARN_ON_ONCE(pmd_bad(*pmd)); 384029c54b0SArd Biesheuvel if (pmd_none(*pmd) || pmd_bad(*pmd)) 385c2febafcSKirill A. Shutemov return NULL; 386db64fe02SNick Piggin 38748667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 38848667e7aSChristoph Lameter pte = *ptep; 38948667e7aSChristoph Lameter if (pte_present(pte)) 390add688fbSmalc page = pte_page(pte); 39148667e7aSChristoph Lameter pte_unmap(ptep); 392add688fbSmalc return page; 393ece86e22SJianyu Zhan } 394ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 395ece86e22SJianyu Zhan 396add688fbSmalc /* 397add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 398add688fbSmalc */ 399add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 400add688fbSmalc { 401add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 402add688fbSmalc } 403add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 404add688fbSmalc 405db64fe02SNick Piggin 406db64fe02SNick Piggin /*** Global kva allocator ***/ 407db64fe02SNick Piggin 408bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 409a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 410bb850f4dSUladzislau Rezki (Sony) 411db64fe02SNick Piggin 412db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 413e36176beSUladzislau Rezki (Sony) static DEFINE_SPINLOCK(free_vmap_area_lock); 414f1c4069eSJoonsoo Kim /* Export for kexec only */ 415f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list); 41680c4bd7aSChris Wilson static LLIST_HEAD(vmap_purge_list); 41789699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 41868ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly; 41989699605SNick Piggin 42068ad4a33SUladzislau Rezki (Sony) /* 42168ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of 42268ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to 42368ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of 42468ad4a33SUladzislau Rezki (Sony) * free block. 42568ad4a33SUladzislau Rezki (Sony) */ 42668ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep; 42789699605SNick Piggin 42868ad4a33SUladzislau Rezki (Sony) /* 42968ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root. 43068ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing. 43168ad4a33SUladzislau Rezki (Sony) */ 43268ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list); 43368ad4a33SUladzislau Rezki (Sony) 43468ad4a33SUladzislau Rezki (Sony) /* 43568ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space. 43668ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start 43768ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap 43868ad4a33SUladzislau Rezki (Sony) * object is released. 43968ad4a33SUladzislau Rezki (Sony) * 44068ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block 44168ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to 44268ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area. 44368ad4a33SUladzislau Rezki (Sony) */ 44468ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT; 44568ad4a33SUladzislau Rezki (Sony) 44682dd23e8SUladzislau Rezki (Sony) /* 44782dd23e8SUladzislau Rezki (Sony) * Preload a CPU with one object for "no edge" split case. The 44882dd23e8SUladzislau Rezki (Sony) * aim is to get rid of allocations from the atomic context, thus 44982dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks. 45082dd23e8SUladzislau Rezki (Sony) */ 45182dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 45282dd23e8SUladzislau Rezki (Sony) 45368ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 45468ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va) 45568ad4a33SUladzislau Rezki (Sony) { 45668ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start); 45768ad4a33SUladzislau Rezki (Sony) } 45868ad4a33SUladzislau Rezki (Sony) 45968ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 46068ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node) 46168ad4a33SUladzislau Rezki (Sony) { 46268ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 46368ad4a33SUladzislau Rezki (Sony) 46468ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node); 46568ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0; 46668ad4a33SUladzislau Rezki (Sony) } 46768ad4a33SUladzislau Rezki (Sony) 46868ad4a33SUladzislau Rezki (Sony) /* 46968ad4a33SUladzislau Rezki (Sony) * Gets called when remove the node and rotate. 47068ad4a33SUladzislau Rezki (Sony) */ 47168ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 47268ad4a33SUladzislau Rezki (Sony) compute_subtree_max_size(struct vmap_area *va) 47368ad4a33SUladzislau Rezki (Sony) { 47468ad4a33SUladzislau Rezki (Sony) return max3(va_size(va), 47568ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_left), 47668ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_right)); 47768ad4a33SUladzislau Rezki (Sony) } 47868ad4a33SUladzislau Rezki (Sony) 479315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 480315cc066SMichel Lespinasse struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 48168ad4a33SUladzislau Rezki (Sony) 48268ad4a33SUladzislau Rezki (Sony) static void purge_vmap_area_lazy(void); 48368ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 48468ad4a33SUladzislau Rezki (Sony) static unsigned long lazy_max_pages(void); 485db64fe02SNick Piggin 48697105f0aSRoman Gushchin static atomic_long_t nr_vmalloc_pages; 48797105f0aSRoman Gushchin 48897105f0aSRoman Gushchin unsigned long vmalloc_nr_pages(void) 48997105f0aSRoman Gushchin { 49097105f0aSRoman Gushchin return atomic_long_read(&nr_vmalloc_pages); 49197105f0aSRoman Gushchin } 49297105f0aSRoman Gushchin 493db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 4941da177e4SLinus Torvalds { 495db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 496db64fe02SNick Piggin 497db64fe02SNick Piggin while (n) { 498db64fe02SNick Piggin struct vmap_area *va; 499db64fe02SNick Piggin 500db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 501db64fe02SNick Piggin if (addr < va->va_start) 502db64fe02SNick Piggin n = n->rb_left; 503cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 504db64fe02SNick Piggin n = n->rb_right; 505db64fe02SNick Piggin else 506db64fe02SNick Piggin return va; 507db64fe02SNick Piggin } 508db64fe02SNick Piggin 509db64fe02SNick Piggin return NULL; 510db64fe02SNick Piggin } 511db64fe02SNick Piggin 51268ad4a33SUladzislau Rezki (Sony) /* 51368ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node 51468ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing. 51568ad4a33SUladzislau Rezki (Sony) */ 51668ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node ** 51768ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va, 51868ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from, 51968ad4a33SUladzislau Rezki (Sony) struct rb_node **parent) 520db64fe02SNick Piggin { 521170168d0SNamhyung Kim struct vmap_area *tmp_va; 52268ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 523db64fe02SNick Piggin 52468ad4a33SUladzislau Rezki (Sony) if (root) { 52568ad4a33SUladzislau Rezki (Sony) link = &root->rb_node; 52668ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) { 52768ad4a33SUladzislau Rezki (Sony) *parent = NULL; 52868ad4a33SUladzislau Rezki (Sony) return link; 52968ad4a33SUladzislau Rezki (Sony) } 53068ad4a33SUladzislau Rezki (Sony) } else { 53168ad4a33SUladzislau Rezki (Sony) link = &from; 53268ad4a33SUladzislau Rezki (Sony) } 53368ad4a33SUladzislau Rezki (Sony) 53468ad4a33SUladzislau Rezki (Sony) /* 53568ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point 53668ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name 53768ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to. 53868ad4a33SUladzislau Rezki (Sony) */ 53968ad4a33SUladzislau Rezki (Sony) do { 54068ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node); 54168ad4a33SUladzislau Rezki (Sony) 54268ad4a33SUladzislau Rezki (Sony) /* 54368ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check. 54468ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right) 54568ad4a33SUladzislau Rezki (Sony) * or full overlaps. 54668ad4a33SUladzislau Rezki (Sony) */ 54768ad4a33SUladzislau Rezki (Sony) if (va->va_start < tmp_va->va_end && 54868ad4a33SUladzislau Rezki (Sony) va->va_end <= tmp_va->va_start) 54968ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left; 55068ad4a33SUladzislau Rezki (Sony) else if (va->va_end > tmp_va->va_start && 55168ad4a33SUladzislau Rezki (Sony) va->va_start >= tmp_va->va_end) 55268ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right; 553db64fe02SNick Piggin else 554db64fe02SNick Piggin BUG(); 55568ad4a33SUladzislau Rezki (Sony) } while (*link); 55668ad4a33SUladzislau Rezki (Sony) 55768ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node; 55868ad4a33SUladzislau Rezki (Sony) return link; 559db64fe02SNick Piggin } 560db64fe02SNick Piggin 56168ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head * 56268ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 56368ad4a33SUladzislau Rezki (Sony) { 56468ad4a33SUladzislau Rezki (Sony) struct list_head *list; 565db64fe02SNick Piggin 56668ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent)) 56768ad4a33SUladzislau Rezki (Sony) /* 56868ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors 56968ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means 57068ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not 57168ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway. 57268ad4a33SUladzislau Rezki (Sony) */ 57368ad4a33SUladzislau Rezki (Sony) return NULL; 57468ad4a33SUladzislau Rezki (Sony) 57568ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list; 57668ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list); 577db64fe02SNick Piggin } 578db64fe02SNick Piggin 57968ad4a33SUladzislau Rezki (Sony) static __always_inline void 58068ad4a33SUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root, 58168ad4a33SUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, struct list_head *head) 58268ad4a33SUladzislau Rezki (Sony) { 58368ad4a33SUladzislau Rezki (Sony) /* 58468ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can 58568ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node. 58668ad4a33SUladzislau Rezki (Sony) */ 58768ad4a33SUladzislau Rezki (Sony) if (likely(parent)) { 58868ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list; 58968ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link) 59068ad4a33SUladzislau Rezki (Sony) head = head->prev; 59168ad4a33SUladzislau Rezki (Sony) } 592db64fe02SNick Piggin 59368ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */ 59468ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link); 59568ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) { 59668ad4a33SUladzislau Rezki (Sony) /* 59768ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion 59868ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to 59968ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented(). 60068ad4a33SUladzislau Rezki (Sony) * It is because of we populate the tree from the bottom 60168ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree. 60268ad4a33SUladzislau Rezki (Sony) * 60368ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion, 60468ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to 60568ad4a33SUladzislau Rezki (Sony) * the correct order later on. 60668ad4a33SUladzislau Rezki (Sony) */ 60768ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node, 60868ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 60968ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0; 61068ad4a33SUladzislau Rezki (Sony) } else { 61168ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root); 61268ad4a33SUladzislau Rezki (Sony) } 61368ad4a33SUladzislau Rezki (Sony) 61468ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */ 61568ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head); 61668ad4a33SUladzislau Rezki (Sony) } 61768ad4a33SUladzislau Rezki (Sony) 61868ad4a33SUladzislau Rezki (Sony) static __always_inline void 61968ad4a33SUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root) 62068ad4a33SUladzislau Rezki (Sony) { 621460e42d1SUladzislau Rezki (Sony) if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 622460e42d1SUladzislau Rezki (Sony) return; 623460e42d1SUladzislau Rezki (Sony) 62468ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) 62568ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node, 62668ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 62768ad4a33SUladzislau Rezki (Sony) else 62868ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root); 62968ad4a33SUladzislau Rezki (Sony) 63068ad4a33SUladzislau Rezki (Sony) list_del(&va->list); 63168ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node); 63268ad4a33SUladzislau Rezki (Sony) } 63368ad4a33SUladzislau Rezki (Sony) 634bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 635bb850f4dSUladzislau Rezki (Sony) static void 636bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(struct rb_node *n) 637bb850f4dSUladzislau Rezki (Sony) { 638bb850f4dSUladzislau Rezki (Sony) struct vmap_area *va; 639bb850f4dSUladzislau Rezki (Sony) struct rb_node *node; 640bb850f4dSUladzislau Rezki (Sony) unsigned long size; 641bb850f4dSUladzislau Rezki (Sony) bool found = false; 642bb850f4dSUladzislau Rezki (Sony) 643bb850f4dSUladzislau Rezki (Sony) if (n == NULL) 644bb850f4dSUladzislau Rezki (Sony) return; 645bb850f4dSUladzislau Rezki (Sony) 646bb850f4dSUladzislau Rezki (Sony) va = rb_entry(n, struct vmap_area, rb_node); 647bb850f4dSUladzislau Rezki (Sony) size = va->subtree_max_size; 648bb850f4dSUladzislau Rezki (Sony) node = n; 649bb850f4dSUladzislau Rezki (Sony) 650bb850f4dSUladzislau Rezki (Sony) while (node) { 651bb850f4dSUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 652bb850f4dSUladzislau Rezki (Sony) 653bb850f4dSUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_left) == size) { 654bb850f4dSUladzislau Rezki (Sony) node = node->rb_left; 655bb850f4dSUladzislau Rezki (Sony) } else { 656bb850f4dSUladzislau Rezki (Sony) if (va_size(va) == size) { 657bb850f4dSUladzislau Rezki (Sony) found = true; 658bb850f4dSUladzislau Rezki (Sony) break; 659bb850f4dSUladzislau Rezki (Sony) } 660bb850f4dSUladzislau Rezki (Sony) 661bb850f4dSUladzislau Rezki (Sony) node = node->rb_right; 662bb850f4dSUladzislau Rezki (Sony) } 663bb850f4dSUladzislau Rezki (Sony) } 664bb850f4dSUladzislau Rezki (Sony) 665bb850f4dSUladzislau Rezki (Sony) if (!found) { 666bb850f4dSUladzislau Rezki (Sony) va = rb_entry(n, struct vmap_area, rb_node); 667bb850f4dSUladzislau Rezki (Sony) pr_emerg("tree is corrupted: %lu, %lu\n", 668bb850f4dSUladzislau Rezki (Sony) va_size(va), va->subtree_max_size); 669bb850f4dSUladzislau Rezki (Sony) } 670bb850f4dSUladzislau Rezki (Sony) 671bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(n->rb_left); 672bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(n->rb_right); 673bb850f4dSUladzislau Rezki (Sony) } 674bb850f4dSUladzislau Rezki (Sony) #endif 675bb850f4dSUladzislau Rezki (Sony) 67668ad4a33SUladzislau Rezki (Sony) /* 67768ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper 67868ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done 67968ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or 68068ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree. 68168ad4a33SUladzislau Rezki (Sony) * 68268ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called: 68368ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path); 68468ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path); 68568ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path). 68668ad4a33SUladzislau Rezki (Sony) * 68768ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes 68868ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up 68968ad4a33SUladzislau Rezki (Sony) * to the root node. 69068ad4a33SUladzislau Rezki (Sony) * 69168ad4a33SUladzislau Rezki (Sony) * 4--8 69268ad4a33SUladzislau Rezki (Sony) * /\ 69368ad4a33SUladzislau Rezki (Sony) * / \ 69468ad4a33SUladzislau Rezki (Sony) * / \ 69568ad4a33SUladzislau Rezki (Sony) * 2--2 8--8 69668ad4a33SUladzislau Rezki (Sony) * 69768ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then 69868ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1 69968ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink 70068ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent 70168ad4a33SUladzislau Rezki (Sony) * node becomes 4--6. 70268ad4a33SUladzislau Rezki (Sony) */ 70368ad4a33SUladzislau Rezki (Sony) static __always_inline void 70468ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va) 70568ad4a33SUladzislau Rezki (Sony) { 70668ad4a33SUladzislau Rezki (Sony) struct rb_node *node = &va->rb_node; 70768ad4a33SUladzislau Rezki (Sony) unsigned long new_va_sub_max_size; 70868ad4a33SUladzislau Rezki (Sony) 70968ad4a33SUladzislau Rezki (Sony) while (node) { 71068ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 71168ad4a33SUladzislau Rezki (Sony) new_va_sub_max_size = compute_subtree_max_size(va); 71268ad4a33SUladzislau Rezki (Sony) 71368ad4a33SUladzislau Rezki (Sony) /* 71468ad4a33SUladzislau Rezki (Sony) * If the newly calculated maximum available size of the 71568ad4a33SUladzislau Rezki (Sony) * subtree is equal to the current one, then it means that 71668ad4a33SUladzislau Rezki (Sony) * the tree is propagated correctly. So we have to stop at 71768ad4a33SUladzislau Rezki (Sony) * this point to save cycles. 71868ad4a33SUladzislau Rezki (Sony) */ 71968ad4a33SUladzislau Rezki (Sony) if (va->subtree_max_size == new_va_sub_max_size) 72068ad4a33SUladzislau Rezki (Sony) break; 72168ad4a33SUladzislau Rezki (Sony) 72268ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = new_va_sub_max_size; 72368ad4a33SUladzislau Rezki (Sony) node = rb_parent(&va->rb_node); 72468ad4a33SUladzislau Rezki (Sony) } 725bb850f4dSUladzislau Rezki (Sony) 726bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 727bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(free_vmap_area_root.rb_node); 728bb850f4dSUladzislau Rezki (Sony) #endif 72968ad4a33SUladzislau Rezki (Sony) } 73068ad4a33SUladzislau Rezki (Sony) 73168ad4a33SUladzislau Rezki (Sony) static void 73268ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va, 73368ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 73468ad4a33SUladzislau Rezki (Sony) { 73568ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 73668ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 73768ad4a33SUladzislau Rezki (Sony) 73868ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 73968ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 74068ad4a33SUladzislau Rezki (Sony) } 74168ad4a33SUladzislau Rezki (Sony) 74268ad4a33SUladzislau Rezki (Sony) static void 74368ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va, 74468ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root, 74568ad4a33SUladzislau Rezki (Sony) struct list_head *head) 74668ad4a33SUladzislau Rezki (Sony) { 74768ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 74868ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 74968ad4a33SUladzislau Rezki (Sony) 75068ad4a33SUladzislau Rezki (Sony) if (from) 75168ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent); 75268ad4a33SUladzislau Rezki (Sony) else 75368ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 75468ad4a33SUladzislau Rezki (Sony) 75568ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 75668ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 75768ad4a33SUladzislau Rezki (Sony) } 75868ad4a33SUladzislau Rezki (Sony) 75968ad4a33SUladzislau Rezki (Sony) /* 76068ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous 76168ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new 76268ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is 76368ad4a33SUladzislau Rezki (Sony) * freed. 76468ad4a33SUladzislau Rezki (Sony) */ 7653c5c3cfbSDaniel Axtens static __always_inline struct vmap_area * 76668ad4a33SUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va, 76768ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 76868ad4a33SUladzislau Rezki (Sony) { 76968ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling; 77068ad4a33SUladzislau Rezki (Sony) struct list_head *next; 77168ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 77268ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 77368ad4a33SUladzislau Rezki (Sony) bool merged = false; 77468ad4a33SUladzislau Rezki (Sony) 77568ad4a33SUladzislau Rezki (Sony) /* 77668ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be 77768ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings. 77868ad4a33SUladzislau Rezki (Sony) */ 77968ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 78068ad4a33SUladzislau Rezki (Sony) 78168ad4a33SUladzislau Rezki (Sony) /* 78268ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done. 78368ad4a33SUladzislau Rezki (Sony) */ 78468ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link); 78568ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL)) 78668ad4a33SUladzislau Rezki (Sony) goto insert; 78768ad4a33SUladzislau Rezki (Sony) 78868ad4a33SUladzislau Rezki (Sony) /* 78968ad4a33SUladzislau Rezki (Sony) * start end 79068ad4a33SUladzislau Rezki (Sony) * | | 79168ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->| 79268ad4a33SUladzislau Rezki (Sony) * | | 79368ad4a33SUladzislau Rezki (Sony) * start end 79468ad4a33SUladzislau Rezki (Sony) */ 79568ad4a33SUladzislau Rezki (Sony) if (next != head) { 79668ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list); 79768ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) { 79868ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start; 79968ad4a33SUladzislau Rezki (Sony) 80068ad4a33SUladzislau Rezki (Sony) /* Check and update the tree if needed. */ 80168ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(sibling); 80268ad4a33SUladzislau Rezki (Sony) 80368ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 80468ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 80568ad4a33SUladzislau Rezki (Sony) 80668ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */ 80768ad4a33SUladzislau Rezki (Sony) va = sibling; 80868ad4a33SUladzislau Rezki (Sony) merged = true; 80968ad4a33SUladzislau Rezki (Sony) } 81068ad4a33SUladzislau Rezki (Sony) } 81168ad4a33SUladzislau Rezki (Sony) 81268ad4a33SUladzislau Rezki (Sony) /* 81368ad4a33SUladzislau Rezki (Sony) * start end 81468ad4a33SUladzislau Rezki (Sony) * | | 81568ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>| 81668ad4a33SUladzislau Rezki (Sony) * | | 81768ad4a33SUladzislau Rezki (Sony) * start end 81868ad4a33SUladzislau Rezki (Sony) */ 81968ad4a33SUladzislau Rezki (Sony) if (next->prev != head) { 82068ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list); 82168ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) { 82268ad4a33SUladzislau Rezki (Sony) sibling->va_end = va->va_end; 82368ad4a33SUladzislau Rezki (Sony) 82468ad4a33SUladzislau Rezki (Sony) /* Check and update the tree if needed. */ 82568ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(sibling); 82668ad4a33SUladzislau Rezki (Sony) 82754f63d9dSUladzislau Rezki (Sony) if (merged) 82868ad4a33SUladzislau Rezki (Sony) unlink_va(va, root); 82968ad4a33SUladzislau Rezki (Sony) 83068ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 83168ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 8323c5c3cfbSDaniel Axtens 8333c5c3cfbSDaniel Axtens /* Point to the new merged area. */ 8343c5c3cfbSDaniel Axtens va = sibling; 8353c5c3cfbSDaniel Axtens merged = true; 83668ad4a33SUladzislau Rezki (Sony) } 83768ad4a33SUladzislau Rezki (Sony) } 83868ad4a33SUladzislau Rezki (Sony) 83968ad4a33SUladzislau Rezki (Sony) insert: 84068ad4a33SUladzislau Rezki (Sony) if (!merged) { 84168ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 84268ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 84368ad4a33SUladzislau Rezki (Sony) } 8443c5c3cfbSDaniel Axtens 8453c5c3cfbSDaniel Axtens return va; 84668ad4a33SUladzislau Rezki (Sony) } 84768ad4a33SUladzislau Rezki (Sony) 84868ad4a33SUladzislau Rezki (Sony) static __always_inline bool 84968ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size, 85068ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 85168ad4a33SUladzislau Rezki (Sony) { 85268ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 85368ad4a33SUladzislau Rezki (Sony) 85468ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 85568ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 85668ad4a33SUladzislau Rezki (Sony) else 85768ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 85868ad4a33SUladzislau Rezki (Sony) 85968ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */ 86068ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr || 86168ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart) 86268ad4a33SUladzislau Rezki (Sony) return false; 86368ad4a33SUladzislau Rezki (Sony) 86468ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end); 86568ad4a33SUladzislau Rezki (Sony) } 86668ad4a33SUladzislau Rezki (Sony) 86768ad4a33SUladzislau Rezki (Sony) /* 86868ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree, 86968ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing 87068ad4a33SUladzislau Rezki (Sony) * parameters. 87168ad4a33SUladzislau Rezki (Sony) */ 87268ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 87368ad4a33SUladzislau Rezki (Sony) find_vmap_lowest_match(unsigned long size, 87468ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 87568ad4a33SUladzislau Rezki (Sony) { 87668ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 87768ad4a33SUladzislau Rezki (Sony) struct rb_node *node; 87868ad4a33SUladzislau Rezki (Sony) unsigned long length; 87968ad4a33SUladzislau Rezki (Sony) 88068ad4a33SUladzislau Rezki (Sony) /* Start from the root. */ 88168ad4a33SUladzislau Rezki (Sony) node = free_vmap_area_root.rb_node; 88268ad4a33SUladzislau Rezki (Sony) 88368ad4a33SUladzislau Rezki (Sony) /* Adjust the search size for alignment overhead. */ 88468ad4a33SUladzislau Rezki (Sony) length = size + align - 1; 88568ad4a33SUladzislau Rezki (Sony) 88668ad4a33SUladzislau Rezki (Sony) while (node) { 88768ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 88868ad4a33SUladzislau Rezki (Sony) 88968ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_left) >= length && 89068ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) { 89168ad4a33SUladzislau Rezki (Sony) node = node->rb_left; 89268ad4a33SUladzislau Rezki (Sony) } else { 89368ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 89468ad4a33SUladzislau Rezki (Sony) return va; 89568ad4a33SUladzislau Rezki (Sony) 89668ad4a33SUladzislau Rezki (Sony) /* 89768ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right 89868ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is 89968ad4a33SUladzislau Rezki (Sony) * equal or bigger to the requested search length. 90068ad4a33SUladzislau Rezki (Sony) */ 90168ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length) { 90268ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 90368ad4a33SUladzislau Rezki (Sony) continue; 90468ad4a33SUladzislau Rezki (Sony) } 90568ad4a33SUladzislau Rezki (Sony) 90668ad4a33SUladzislau Rezki (Sony) /* 9073806b041SAndrew Morton * OK. We roll back and find the first right sub-tree, 90868ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen 90968ad4a33SUladzislau Rezki (Sony) * only once due to "vstart" restriction. 91068ad4a33SUladzislau Rezki (Sony) */ 91168ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) { 91268ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 91368ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 91468ad4a33SUladzislau Rezki (Sony) return va; 91568ad4a33SUladzislau Rezki (Sony) 91668ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length && 91768ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) { 91868ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 91968ad4a33SUladzislau Rezki (Sony) break; 92068ad4a33SUladzislau Rezki (Sony) } 92168ad4a33SUladzislau Rezki (Sony) } 92268ad4a33SUladzislau Rezki (Sony) } 92368ad4a33SUladzislau Rezki (Sony) } 92468ad4a33SUladzislau Rezki (Sony) 92568ad4a33SUladzislau Rezki (Sony) return NULL; 92668ad4a33SUladzislau Rezki (Sony) } 92768ad4a33SUladzislau Rezki (Sony) 928a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 929a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h> 930a6cf4e0fSUladzislau Rezki (Sony) 931a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area * 932a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_linear_match(unsigned long size, 933a6cf4e0fSUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 934a6cf4e0fSUladzislau Rezki (Sony) { 935a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va; 936a6cf4e0fSUladzislau Rezki (Sony) 937a6cf4e0fSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) { 938a6cf4e0fSUladzislau Rezki (Sony) if (!is_within_this_va(va, size, align, vstart)) 939a6cf4e0fSUladzislau Rezki (Sony) continue; 940a6cf4e0fSUladzislau Rezki (Sony) 941a6cf4e0fSUladzislau Rezki (Sony) return va; 942a6cf4e0fSUladzislau Rezki (Sony) } 943a6cf4e0fSUladzislau Rezki (Sony) 944a6cf4e0fSUladzislau Rezki (Sony) return NULL; 945a6cf4e0fSUladzislau Rezki (Sony) } 946a6cf4e0fSUladzislau Rezki (Sony) 947a6cf4e0fSUladzislau Rezki (Sony) static void 948a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_match_check(unsigned long size) 949a6cf4e0fSUladzislau Rezki (Sony) { 950a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va_1, *va_2; 951a6cf4e0fSUladzislau Rezki (Sony) unsigned long vstart; 952a6cf4e0fSUladzislau Rezki (Sony) unsigned int rnd; 953a6cf4e0fSUladzislau Rezki (Sony) 954a6cf4e0fSUladzislau Rezki (Sony) get_random_bytes(&rnd, sizeof(rnd)); 955a6cf4e0fSUladzislau Rezki (Sony) vstart = VMALLOC_START + rnd; 956a6cf4e0fSUladzislau Rezki (Sony) 957a6cf4e0fSUladzislau Rezki (Sony) va_1 = find_vmap_lowest_match(size, 1, vstart); 958a6cf4e0fSUladzislau Rezki (Sony) va_2 = find_vmap_lowest_linear_match(size, 1, vstart); 959a6cf4e0fSUladzislau Rezki (Sony) 960a6cf4e0fSUladzislau Rezki (Sony) if (va_1 != va_2) 961a6cf4e0fSUladzislau Rezki (Sony) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 962a6cf4e0fSUladzislau Rezki (Sony) va_1, va_2, vstart); 963a6cf4e0fSUladzislau Rezki (Sony) } 964a6cf4e0fSUladzislau Rezki (Sony) #endif 965a6cf4e0fSUladzislau Rezki (Sony) 96668ad4a33SUladzislau Rezki (Sony) enum fit_type { 96768ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0, 96868ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */ 96968ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */ 97068ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */ 97168ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */ 97268ad4a33SUladzislau Rezki (Sony) }; 97368ad4a33SUladzislau Rezki (Sony) 97468ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type 97568ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va, 97668ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size) 97768ad4a33SUladzislau Rezki (Sony) { 97868ad4a33SUladzislau Rezki (Sony) enum fit_type type; 97968ad4a33SUladzislau Rezki (Sony) 98068ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */ 98168ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start || 98268ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end) 98368ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT; 98468ad4a33SUladzislau Rezki (Sony) 98568ad4a33SUladzislau Rezki (Sony) /* Now classify. */ 98668ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) { 98768ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size) 98868ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE; 98968ad4a33SUladzislau Rezki (Sony) else 99068ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE; 99168ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) { 99268ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE; 99368ad4a33SUladzislau Rezki (Sony) } else { 99468ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE; 99568ad4a33SUladzislau Rezki (Sony) } 99668ad4a33SUladzislau Rezki (Sony) 99768ad4a33SUladzislau Rezki (Sony) return type; 99868ad4a33SUladzislau Rezki (Sony) } 99968ad4a33SUladzislau Rezki (Sony) 100068ad4a33SUladzislau Rezki (Sony) static __always_inline int 100168ad4a33SUladzislau Rezki (Sony) adjust_va_to_fit_type(struct vmap_area *va, 100268ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size, 100368ad4a33SUladzislau Rezki (Sony) enum fit_type type) 100468ad4a33SUladzislau Rezki (Sony) { 10052c929233SArnd Bergmann struct vmap_area *lva = NULL; 100668ad4a33SUladzislau Rezki (Sony) 100768ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) { 100868ad4a33SUladzislau Rezki (Sony) /* 100968ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits. 101068ad4a33SUladzislau Rezki (Sony) * 101168ad4a33SUladzislau Rezki (Sony) * | | 101268ad4a33SUladzislau Rezki (Sony) * V NVA V 101368ad4a33SUladzislau Rezki (Sony) * |---------------| 101468ad4a33SUladzislau Rezki (Sony) */ 101568ad4a33SUladzislau Rezki (Sony) unlink_va(va, &free_vmap_area_root); 101668ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 101768ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) { 101868ad4a33SUladzislau Rezki (Sony) /* 101968ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA. 102068ad4a33SUladzislau Rezki (Sony) * 102168ad4a33SUladzislau Rezki (Sony) * | | 102268ad4a33SUladzislau Rezki (Sony) * V NVA V R 102368ad4a33SUladzislau Rezki (Sony) * |-------|-------| 102468ad4a33SUladzislau Rezki (Sony) */ 102568ad4a33SUladzislau Rezki (Sony) va->va_start += size; 102668ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) { 102768ad4a33SUladzislau Rezki (Sony) /* 102868ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA. 102968ad4a33SUladzislau Rezki (Sony) * 103068ad4a33SUladzislau Rezki (Sony) * | | 103168ad4a33SUladzislau Rezki (Sony) * L V NVA V 103268ad4a33SUladzislau Rezki (Sony) * |-------|-------| 103368ad4a33SUladzislau Rezki (Sony) */ 103468ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr; 103568ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) { 103668ad4a33SUladzislau Rezki (Sony) /* 103768ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA. 103868ad4a33SUladzislau Rezki (Sony) * 103968ad4a33SUladzislau Rezki (Sony) * | | 104068ad4a33SUladzislau Rezki (Sony) * L V NVA V R 104168ad4a33SUladzislau Rezki (Sony) * |---|-------|---| 104268ad4a33SUladzislau Rezki (Sony) */ 104382dd23e8SUladzislau Rezki (Sony) lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 104482dd23e8SUladzislau Rezki (Sony) if (unlikely(!lva)) { 104582dd23e8SUladzislau Rezki (Sony) /* 104682dd23e8SUladzislau Rezki (Sony) * For percpu allocator we do not do any pre-allocation 104782dd23e8SUladzislau Rezki (Sony) * and leave it as it is. The reason is it most likely 104882dd23e8SUladzislau Rezki (Sony) * never ends up with NE_FIT_TYPE splitting. In case of 104982dd23e8SUladzislau Rezki (Sony) * percpu allocations offsets and sizes are aligned to 105082dd23e8SUladzislau Rezki (Sony) * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 105182dd23e8SUladzislau Rezki (Sony) * are its main fitting cases. 105282dd23e8SUladzislau Rezki (Sony) * 105382dd23e8SUladzislau Rezki (Sony) * There are a few exceptions though, as an example it is 105482dd23e8SUladzislau Rezki (Sony) * a first allocation (early boot up) when we have "one" 105582dd23e8SUladzislau Rezki (Sony) * big free space that has to be split. 1056060650a2SUladzislau Rezki (Sony) * 1057060650a2SUladzislau Rezki (Sony) * Also we can hit this path in case of regular "vmap" 1058060650a2SUladzislau Rezki (Sony) * allocations, if "this" current CPU was not preloaded. 1059060650a2SUladzislau Rezki (Sony) * See the comment in alloc_vmap_area() why. If so, then 1060060650a2SUladzislau Rezki (Sony) * GFP_NOWAIT is used instead to get an extra object for 1061060650a2SUladzislau Rezki (Sony) * split purpose. That is rare and most time does not 1062060650a2SUladzislau Rezki (Sony) * occur. 1063060650a2SUladzislau Rezki (Sony) * 1064060650a2SUladzislau Rezki (Sony) * What happens if an allocation gets failed. Basically, 1065060650a2SUladzislau Rezki (Sony) * an "overflow" path is triggered to purge lazily freed 1066060650a2SUladzislau Rezki (Sony) * areas to free some memory, then, the "retry" path is 1067060650a2SUladzislau Rezki (Sony) * triggered to repeat one more time. See more details 1068060650a2SUladzislau Rezki (Sony) * in alloc_vmap_area() function. 106982dd23e8SUladzislau Rezki (Sony) */ 107068ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 107182dd23e8SUladzislau Rezki (Sony) if (!lva) 107268ad4a33SUladzislau Rezki (Sony) return -1; 107382dd23e8SUladzislau Rezki (Sony) } 107468ad4a33SUladzislau Rezki (Sony) 107568ad4a33SUladzislau Rezki (Sony) /* 107668ad4a33SUladzislau Rezki (Sony) * Build the remainder. 107768ad4a33SUladzislau Rezki (Sony) */ 107868ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start; 107968ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr; 108068ad4a33SUladzislau Rezki (Sony) 108168ad4a33SUladzislau Rezki (Sony) /* 108268ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size. 108368ad4a33SUladzislau Rezki (Sony) */ 108468ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size; 108568ad4a33SUladzislau Rezki (Sony) } else { 108668ad4a33SUladzislau Rezki (Sony) return -1; 108768ad4a33SUladzislau Rezki (Sony) } 108868ad4a33SUladzislau Rezki (Sony) 108968ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) { 109068ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 109168ad4a33SUladzislau Rezki (Sony) 10922c929233SArnd Bergmann if (lva) /* type == NE_FIT_TYPE */ 109368ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, 109468ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list); 109568ad4a33SUladzislau Rezki (Sony) } 109668ad4a33SUladzislau Rezki (Sony) 109768ad4a33SUladzislau Rezki (Sony) return 0; 109868ad4a33SUladzislau Rezki (Sony) } 109968ad4a33SUladzislau Rezki (Sony) 110068ad4a33SUladzislau Rezki (Sony) /* 110168ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success. 110268ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure. 110368ad4a33SUladzislau Rezki (Sony) */ 110468ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 110568ad4a33SUladzislau Rezki (Sony) __alloc_vmap_area(unsigned long size, unsigned long align, 1106cacca6baSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend) 110768ad4a33SUladzislau Rezki (Sony) { 110868ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 110968ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 111068ad4a33SUladzislau Rezki (Sony) enum fit_type type; 111168ad4a33SUladzislau Rezki (Sony) int ret; 111268ad4a33SUladzislau Rezki (Sony) 111368ad4a33SUladzislau Rezki (Sony) va = find_vmap_lowest_match(size, align, vstart); 111468ad4a33SUladzislau Rezki (Sony) if (unlikely(!va)) 111568ad4a33SUladzislau Rezki (Sony) return vend; 111668ad4a33SUladzislau Rezki (Sony) 111768ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 111868ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 111968ad4a33SUladzislau Rezki (Sony) else 112068ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 112168ad4a33SUladzislau Rezki (Sony) 112268ad4a33SUladzislau Rezki (Sony) /* Check the "vend" restriction. */ 112368ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size > vend) 112468ad4a33SUladzislau Rezki (Sony) return vend; 112568ad4a33SUladzislau Rezki (Sony) 112668ad4a33SUladzislau Rezki (Sony) /* Classify what we have found. */ 112768ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, nva_start_addr, size); 112868ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 112968ad4a33SUladzislau Rezki (Sony) return vend; 113068ad4a33SUladzislau Rezki (Sony) 113168ad4a33SUladzislau Rezki (Sony) /* Update the free vmap_area. */ 113268ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); 113368ad4a33SUladzislau Rezki (Sony) if (ret) 113468ad4a33SUladzislau Rezki (Sony) return vend; 113568ad4a33SUladzislau Rezki (Sony) 1136a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1137a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_match_check(size); 1138a6cf4e0fSUladzislau Rezki (Sony) #endif 1139a6cf4e0fSUladzislau Rezki (Sony) 114068ad4a33SUladzislau Rezki (Sony) return nva_start_addr; 114168ad4a33SUladzislau Rezki (Sony) } 11424da56b99SChris Wilson 1143db64fe02SNick Piggin /* 1144d98c9e83SAndrey Ryabinin * Free a region of KVA allocated by alloc_vmap_area 1145d98c9e83SAndrey Ryabinin */ 1146d98c9e83SAndrey Ryabinin static void free_vmap_area(struct vmap_area *va) 1147d98c9e83SAndrey Ryabinin { 1148d98c9e83SAndrey Ryabinin /* 1149d98c9e83SAndrey Ryabinin * Remove from the busy tree/list. 1150d98c9e83SAndrey Ryabinin */ 1151d98c9e83SAndrey Ryabinin spin_lock(&vmap_area_lock); 1152d98c9e83SAndrey Ryabinin unlink_va(va, &vmap_area_root); 1153d98c9e83SAndrey Ryabinin spin_unlock(&vmap_area_lock); 1154d98c9e83SAndrey Ryabinin 1155d98c9e83SAndrey Ryabinin /* 1156d98c9e83SAndrey Ryabinin * Insert/Merge it back to the free tree/list. 1157d98c9e83SAndrey Ryabinin */ 1158d98c9e83SAndrey Ryabinin spin_lock(&free_vmap_area_lock); 1159d98c9e83SAndrey Ryabinin merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list); 1160d98c9e83SAndrey Ryabinin spin_unlock(&free_vmap_area_lock); 1161d98c9e83SAndrey Ryabinin } 1162d98c9e83SAndrey Ryabinin 1163d98c9e83SAndrey Ryabinin /* 1164db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 1165db64fe02SNick Piggin * vstart and vend. 1166db64fe02SNick Piggin */ 1167db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 1168db64fe02SNick Piggin unsigned long align, 1169db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 1170db64fe02SNick Piggin int node, gfp_t gfp_mask) 1171db64fe02SNick Piggin { 117282dd23e8SUladzislau Rezki (Sony) struct vmap_area *va, *pva; 11731da177e4SLinus Torvalds unsigned long addr; 1174db64fe02SNick Piggin int purged = 0; 1175d98c9e83SAndrey Ryabinin int ret; 1176db64fe02SNick Piggin 11777766970cSNick Piggin BUG_ON(!size); 1178891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 117989699605SNick Piggin BUG_ON(!is_power_of_2(align)); 1180db64fe02SNick Piggin 118168ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized)) 118268ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY); 118368ad4a33SUladzislau Rezki (Sony) 11845803ed29SChristoph Hellwig might_sleep(); 1185f07116d7SUladzislau Rezki (Sony) gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 11864da56b99SChris Wilson 1187f07116d7SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1188db64fe02SNick Piggin if (unlikely(!va)) 1189db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1190db64fe02SNick Piggin 11917f88f88fSCatalin Marinas /* 11927f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 11937f88f88fSCatalin Marinas * to avoid false negatives. 11947f88f88fSCatalin Marinas */ 1195f07116d7SUladzislau Rezki (Sony) kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 11967f88f88fSCatalin Marinas 1197db64fe02SNick Piggin retry: 119882dd23e8SUladzislau Rezki (Sony) /* 119981f1ba58SUladzislau Rezki (Sony) * Preload this CPU with one extra vmap_area object. It is used 120081f1ba58SUladzislau Rezki (Sony) * when fit type of free area is NE_FIT_TYPE. Please note, it 120181f1ba58SUladzislau Rezki (Sony) * does not guarantee that an allocation occurs on a CPU that 120281f1ba58SUladzislau Rezki (Sony) * is preloaded, instead we minimize the case when it is not. 120381f1ba58SUladzislau Rezki (Sony) * It can happen because of cpu migration, because there is a 120481f1ba58SUladzislau Rezki (Sony) * race until the below spinlock is taken. 120582dd23e8SUladzislau Rezki (Sony) * 120682dd23e8SUladzislau Rezki (Sony) * The preload is done in non-atomic context, thus it allows us 120782dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks to be more stable under 120881f1ba58SUladzislau Rezki (Sony) * low memory condition and high memory pressure. In rare case, 120981f1ba58SUladzislau Rezki (Sony) * if not preloaded, GFP_NOWAIT is used. 121082dd23e8SUladzislau Rezki (Sony) * 121181f1ba58SUladzislau Rezki (Sony) * Set "pva" to NULL here, because of "retry" path. 121282dd23e8SUladzislau Rezki (Sony) */ 121381f1ba58SUladzislau Rezki (Sony) pva = NULL; 121482dd23e8SUladzislau Rezki (Sony) 121581f1ba58SUladzislau Rezki (Sony) if (!this_cpu_read(ne_fit_preload_node)) 121681f1ba58SUladzislau Rezki (Sony) /* 121781f1ba58SUladzislau Rezki (Sony) * Even if it fails we do not really care about that. 121881f1ba58SUladzislau Rezki (Sony) * Just proceed as it is. If needed "overflow" path 121981f1ba58SUladzislau Rezki (Sony) * will refill the cache we allocate from. 122081f1ba58SUladzislau Rezki (Sony) */ 1221f07116d7SUladzislau Rezki (Sony) pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 122282dd23e8SUladzislau Rezki (Sony) 1223e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 122481f1ba58SUladzislau Rezki (Sony) 122581f1ba58SUladzislau Rezki (Sony) if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) 122681f1ba58SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, pva); 122768ad4a33SUladzislau Rezki (Sony) 122889699605SNick Piggin /* 122968ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is 123068ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path. 123189699605SNick Piggin */ 1232cacca6baSUladzislau Rezki (Sony) addr = __alloc_vmap_area(size, align, vstart, vend); 1233e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 1234e36176beSUladzislau Rezki (Sony) 123568ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend)) 123689699605SNick Piggin goto overflow; 123789699605SNick Piggin 123889699605SNick Piggin va->va_start = addr; 123989699605SNick Piggin va->va_end = addr + size; 1240688fcbfcSPengfei Li va->vm = NULL; 124168ad4a33SUladzislau Rezki (Sony) 1242d98c9e83SAndrey Ryabinin 1243e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 1244e36176beSUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 124589699605SNick Piggin spin_unlock(&vmap_area_lock); 124689699605SNick Piggin 124761e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 124889699605SNick Piggin BUG_ON(va->va_start < vstart); 124989699605SNick Piggin BUG_ON(va->va_end > vend); 125089699605SNick Piggin 1251d98c9e83SAndrey Ryabinin ret = kasan_populate_vmalloc(addr, size); 1252d98c9e83SAndrey Ryabinin if (ret) { 1253d98c9e83SAndrey Ryabinin free_vmap_area(va); 1254d98c9e83SAndrey Ryabinin return ERR_PTR(ret); 1255d98c9e83SAndrey Ryabinin } 1256d98c9e83SAndrey Ryabinin 125789699605SNick Piggin return va; 125889699605SNick Piggin 12597766970cSNick Piggin overflow: 1260db64fe02SNick Piggin if (!purged) { 1261db64fe02SNick Piggin purge_vmap_area_lazy(); 1262db64fe02SNick Piggin purged = 1; 1263db64fe02SNick Piggin goto retry; 1264db64fe02SNick Piggin } 12654da56b99SChris Wilson 12664da56b99SChris Wilson if (gfpflags_allow_blocking(gfp_mask)) { 12674da56b99SChris Wilson unsigned long freed = 0; 12684da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 12694da56b99SChris Wilson if (freed > 0) { 12704da56b99SChris Wilson purged = 0; 12714da56b99SChris Wilson goto retry; 12724da56b99SChris Wilson } 12734da56b99SChris Wilson } 12744da56b99SChris Wilson 127503497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1276756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1277756a025fSJoe Perches size); 127868ad4a33SUladzislau Rezki (Sony) 127968ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1280db64fe02SNick Piggin return ERR_PTR(-EBUSY); 1281db64fe02SNick Piggin } 1282db64fe02SNick Piggin 12834da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 12844da56b99SChris Wilson { 12854da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 12864da56b99SChris Wilson } 12874da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 12884da56b99SChris Wilson 12894da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 12904da56b99SChris Wilson { 12914da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 12924da56b99SChris Wilson } 12934da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 12944da56b99SChris Wilson 1295db64fe02SNick Piggin /* 1296db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 1297db64fe02SNick Piggin * before attempting to purge with a TLB flush. 1298db64fe02SNick Piggin * 1299db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 1300db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 1301db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 1302db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 1303db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 1304db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 1305db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 1306db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 1307db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 1308db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 1309db64fe02SNick Piggin * becomes a problem on bigger systems. 1310db64fe02SNick Piggin */ 1311db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 1312db64fe02SNick Piggin { 1313db64fe02SNick Piggin unsigned int log; 1314db64fe02SNick Piggin 1315db64fe02SNick Piggin log = fls(num_online_cpus()); 1316db64fe02SNick Piggin 1317db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1318db64fe02SNick Piggin } 1319db64fe02SNick Piggin 13204d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1321db64fe02SNick Piggin 13220574ecd1SChristoph Hellwig /* 13230574ecd1SChristoph Hellwig * Serialize vmap purging. There is no actual criticial section protected 13240574ecd1SChristoph Hellwig * by this look, but we want to avoid concurrent calls for performance 13250574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 13260574ecd1SChristoph Hellwig */ 1327f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 13280574ecd1SChristoph Hellwig 132902b709dfSNick Piggin /* for per-CPU blocks */ 133002b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 133102b709dfSNick Piggin 1332db64fe02SNick Piggin /* 13333ee48b6aSCliff Wickman * called before a call to iounmap() if the caller wants vm_area_struct's 13343ee48b6aSCliff Wickman * immediately freed. 13353ee48b6aSCliff Wickman */ 13363ee48b6aSCliff Wickman void set_iounmap_nonlazy(void) 13373ee48b6aSCliff Wickman { 13384d36e6f8SUladzislau Rezki (Sony) atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); 13393ee48b6aSCliff Wickman } 13403ee48b6aSCliff Wickman 13413ee48b6aSCliff Wickman /* 1342db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 1343db64fe02SNick Piggin */ 13440574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 1345db64fe02SNick Piggin { 13464d36e6f8SUladzislau Rezki (Sony) unsigned long resched_threshold; 134780c4bd7aSChris Wilson struct llist_node *valist; 1348db64fe02SNick Piggin struct vmap_area *va; 1349cbb76676SVegard Nossum struct vmap_area *n_va; 1350db64fe02SNick Piggin 13510574ecd1SChristoph Hellwig lockdep_assert_held(&vmap_purge_lock); 135202b709dfSNick Piggin 135380c4bd7aSChris Wilson valist = llist_del_all(&vmap_purge_list); 135468571be9SUladzislau Rezki (Sony) if (unlikely(valist == NULL)) 135568571be9SUladzislau Rezki (Sony) return false; 135668571be9SUladzislau Rezki (Sony) 135768571be9SUladzislau Rezki (Sony) /* 135868571be9SUladzislau Rezki (Sony) * TODO: to calculate a flush range without looping. 135968571be9SUladzislau Rezki (Sony) * The list can be up to lazy_max_pages() elements. 136068571be9SUladzislau Rezki (Sony) */ 136180c4bd7aSChris Wilson llist_for_each_entry(va, valist, purge_list) { 13620574ecd1SChristoph Hellwig if (va->va_start < start) 13630574ecd1SChristoph Hellwig start = va->va_start; 13640574ecd1SChristoph Hellwig if (va->va_end > end) 13650574ecd1SChristoph Hellwig end = va->va_end; 1366db64fe02SNick Piggin } 1367db64fe02SNick Piggin 13680574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 13694d36e6f8SUladzislau Rezki (Sony) resched_threshold = lazy_max_pages() << 1; 1370db64fe02SNick Piggin 1371e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 1372763b218dSJoel Fernandes llist_for_each_entry_safe(va, n_va, valist, purge_list) { 13734d36e6f8SUladzislau Rezki (Sony) unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 13743c5c3cfbSDaniel Axtens unsigned long orig_start = va->va_start; 13753c5c3cfbSDaniel Axtens unsigned long orig_end = va->va_end; 1376763b218dSJoel Fernandes 1377dd3b8353SUladzislau Rezki (Sony) /* 1378dd3b8353SUladzislau Rezki (Sony) * Finally insert or merge lazily-freed area. It is 1379dd3b8353SUladzislau Rezki (Sony) * detached and there is no need to "unlink" it from 1380dd3b8353SUladzislau Rezki (Sony) * anything. 1381dd3b8353SUladzislau Rezki (Sony) */ 13823c5c3cfbSDaniel Axtens va = merge_or_add_vmap_area(va, &free_vmap_area_root, 13833c5c3cfbSDaniel Axtens &free_vmap_area_list); 13843c5c3cfbSDaniel Axtens 13853c5c3cfbSDaniel Axtens if (is_vmalloc_or_module_addr((void *)orig_start)) 13863c5c3cfbSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 13873c5c3cfbSDaniel Axtens va->va_start, va->va_end); 1388dd3b8353SUladzislau Rezki (Sony) 13894d36e6f8SUladzislau Rezki (Sony) atomic_long_sub(nr, &vmap_lazy_nr); 139068571be9SUladzislau Rezki (Sony) 13914d36e6f8SUladzislau Rezki (Sony) if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) 1392e36176beSUladzislau Rezki (Sony) cond_resched_lock(&free_vmap_area_lock); 1393763b218dSJoel Fernandes } 1394e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 13950574ecd1SChristoph Hellwig return true; 1396db64fe02SNick Piggin } 1397db64fe02SNick Piggin 1398db64fe02SNick Piggin /* 1399496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 1400496850e5SNick Piggin * is already purging. 1401496850e5SNick Piggin */ 1402496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 1403496850e5SNick Piggin { 1404f9e09977SChristoph Hellwig if (mutex_trylock(&vmap_purge_lock)) { 14050574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1406f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 14070574ecd1SChristoph Hellwig } 1408496850e5SNick Piggin } 1409496850e5SNick Piggin 1410496850e5SNick Piggin /* 1411db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 1412db64fe02SNick Piggin */ 1413db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 1414db64fe02SNick Piggin { 1415f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 14160574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 14170574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1418f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1419db64fe02SNick Piggin } 1420db64fe02SNick Piggin 1421db64fe02SNick Piggin /* 142264141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 142364141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 142464141da5SJeremy Fitzhardinge * previously. 1425db64fe02SNick Piggin */ 142664141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 1427db64fe02SNick Piggin { 14284d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy; 142980c4bd7aSChris Wilson 1430dd3b8353SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 1431dd3b8353SUladzislau Rezki (Sony) unlink_va(va, &vmap_area_root); 1432dd3b8353SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 1433dd3b8353SUladzislau Rezki (Sony) 14344d36e6f8SUladzislau Rezki (Sony) nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 14354d36e6f8SUladzislau Rezki (Sony) PAGE_SHIFT, &vmap_lazy_nr); 143680c4bd7aSChris Wilson 143780c4bd7aSChris Wilson /* After this point, we may free va at any time */ 143880c4bd7aSChris Wilson llist_add(&va->purge_list, &vmap_purge_list); 143980c4bd7aSChris Wilson 144080c4bd7aSChris Wilson if (unlikely(nr_lazy > lazy_max_pages())) 1441496850e5SNick Piggin try_purge_vmap_area_lazy(); 1442db64fe02SNick Piggin } 1443db64fe02SNick Piggin 1444b29acbdcSNick Piggin /* 1445b29acbdcSNick Piggin * Free and unmap a vmap area 1446b29acbdcSNick Piggin */ 1447b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 1448b29acbdcSNick Piggin { 1449b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 1450855e57a1SChristoph Hellwig unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start); 14518e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 145282a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end); 145382a2e924SChintan Pandya 1454c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 1455b29acbdcSNick Piggin } 1456b29acbdcSNick Piggin 1457db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 1458db64fe02SNick Piggin { 1459db64fe02SNick Piggin struct vmap_area *va; 1460db64fe02SNick Piggin 1461db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1462db64fe02SNick Piggin va = __find_vmap_area(addr); 1463db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 1464db64fe02SNick Piggin 1465db64fe02SNick Piggin return va; 1466db64fe02SNick Piggin } 1467db64fe02SNick Piggin 1468db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 1469db64fe02SNick Piggin 1470db64fe02SNick Piggin /* 1471db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 1472db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 1473db64fe02SNick Piggin */ 1474db64fe02SNick Piggin /* 1475db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 1476db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 1477db64fe02SNick Piggin * instead (we just need a rough idea) 1478db64fe02SNick Piggin */ 1479db64fe02SNick Piggin #if BITS_PER_LONG == 32 1480db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 1481db64fe02SNick Piggin #else 1482db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 1483db64fe02SNick Piggin #endif 1484db64fe02SNick Piggin 1485db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 1486db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 1487db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 1488db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 1489db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 1490db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 1491f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 1492f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 1493db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 1494f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 1495db64fe02SNick Piggin 1496db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 1497db64fe02SNick Piggin 1498db64fe02SNick Piggin struct vmap_block_queue { 1499db64fe02SNick Piggin spinlock_t lock; 1500db64fe02SNick Piggin struct list_head free; 1501db64fe02SNick Piggin }; 1502db64fe02SNick Piggin 1503db64fe02SNick Piggin struct vmap_block { 1504db64fe02SNick Piggin spinlock_t lock; 1505db64fe02SNick Piggin struct vmap_area *va; 1506db64fe02SNick Piggin unsigned long free, dirty; 15077d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 1508db64fe02SNick Piggin struct list_head free_list; 1509db64fe02SNick Piggin struct rcu_head rcu_head; 151002b709dfSNick Piggin struct list_head purge; 1511db64fe02SNick Piggin }; 1512db64fe02SNick Piggin 1513db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 1514db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 1515db64fe02SNick Piggin 1516db64fe02SNick Piggin /* 1517*0f14599cSMatthew Wilcox (Oracle) * XArray of vmap blocks, indexed by address, to quickly find a vmap block 1518db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 1519db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 1520db64fe02SNick Piggin */ 1521*0f14599cSMatthew Wilcox (Oracle) static DEFINE_XARRAY(vmap_blocks); 1522db64fe02SNick Piggin 1523db64fe02SNick Piggin /* 1524db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 1525db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 1526db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 1527db64fe02SNick Piggin * big problem. 1528db64fe02SNick Piggin */ 1529db64fe02SNick Piggin 1530db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 1531db64fe02SNick Piggin { 1532db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 1533db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 1534db64fe02SNick Piggin return addr; 1535db64fe02SNick Piggin } 1536db64fe02SNick Piggin 1537cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 1538cf725ce2SRoman Pen { 1539cf725ce2SRoman Pen unsigned long addr; 1540cf725ce2SRoman Pen 1541cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 1542cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 1543cf725ce2SRoman Pen return (void *)addr; 1544cf725ce2SRoman Pen } 1545cf725ce2SRoman Pen 1546cf725ce2SRoman Pen /** 1547cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 1548cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 1549cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 1550cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 1551cf725ce2SRoman Pen * 1552a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 1553cf725ce2SRoman Pen */ 1554cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 1555db64fe02SNick Piggin { 1556db64fe02SNick Piggin struct vmap_block_queue *vbq; 1557db64fe02SNick Piggin struct vmap_block *vb; 1558db64fe02SNick Piggin struct vmap_area *va; 1559db64fe02SNick Piggin unsigned long vb_idx; 1560db64fe02SNick Piggin int node, err; 1561cf725ce2SRoman Pen void *vaddr; 1562db64fe02SNick Piggin 1563db64fe02SNick Piggin node = numa_node_id(); 1564db64fe02SNick Piggin 1565db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 1566db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 1567db64fe02SNick Piggin if (unlikely(!vb)) 1568db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1569db64fe02SNick Piggin 1570db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 1571db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 1572db64fe02SNick Piggin node, gfp_mask); 1573ddf9c6d4STobias Klauser if (IS_ERR(va)) { 1574db64fe02SNick Piggin kfree(vb); 1575e7d86340SJulia Lawall return ERR_CAST(va); 1576db64fe02SNick Piggin } 1577db64fe02SNick Piggin 1578cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 1579db64fe02SNick Piggin spin_lock_init(&vb->lock); 1580db64fe02SNick Piggin vb->va = va; 1581cf725ce2SRoman Pen /* At least something should be left free */ 1582cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 1583cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 1584db64fe02SNick Piggin vb->dirty = 0; 15857d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 15867d61bfe8SRoman Pen vb->dirty_max = 0; 1587db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 1588db64fe02SNick Piggin 1589db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 1590*0f14599cSMatthew Wilcox (Oracle) err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask); 1591*0f14599cSMatthew Wilcox (Oracle) if (err) { 1592*0f14599cSMatthew Wilcox (Oracle) kfree(vb); 1593*0f14599cSMatthew Wilcox (Oracle) free_vmap_area(va); 1594*0f14599cSMatthew Wilcox (Oracle) return ERR_PTR(err); 1595*0f14599cSMatthew Wilcox (Oracle) } 1596db64fe02SNick Piggin 1597db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1598db64fe02SNick Piggin spin_lock(&vbq->lock); 159968ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 1600db64fe02SNick Piggin spin_unlock(&vbq->lock); 16013f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1602db64fe02SNick Piggin 1603cf725ce2SRoman Pen return vaddr; 1604db64fe02SNick Piggin } 1605db64fe02SNick Piggin 1606db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 1607db64fe02SNick Piggin { 1608db64fe02SNick Piggin struct vmap_block *tmp; 1609db64fe02SNick Piggin 1610*0f14599cSMatthew Wilcox (Oracle) tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); 1611db64fe02SNick Piggin BUG_ON(tmp != vb); 1612db64fe02SNick Piggin 161364141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 161422a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 1615db64fe02SNick Piggin } 1616db64fe02SNick Piggin 161702b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 161802b709dfSNick Piggin { 161902b709dfSNick Piggin LIST_HEAD(purge); 162002b709dfSNick Piggin struct vmap_block *vb; 162102b709dfSNick Piggin struct vmap_block *n_vb; 162202b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 162302b709dfSNick Piggin 162402b709dfSNick Piggin rcu_read_lock(); 162502b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 162602b709dfSNick Piggin 162702b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 162802b709dfSNick Piggin continue; 162902b709dfSNick Piggin 163002b709dfSNick Piggin spin_lock(&vb->lock); 163102b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 163202b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 163302b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 16347d61bfe8SRoman Pen vb->dirty_min = 0; 16357d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 163602b709dfSNick Piggin spin_lock(&vbq->lock); 163702b709dfSNick Piggin list_del_rcu(&vb->free_list); 163802b709dfSNick Piggin spin_unlock(&vbq->lock); 163902b709dfSNick Piggin spin_unlock(&vb->lock); 164002b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 164102b709dfSNick Piggin } else 164202b709dfSNick Piggin spin_unlock(&vb->lock); 164302b709dfSNick Piggin } 164402b709dfSNick Piggin rcu_read_unlock(); 164502b709dfSNick Piggin 164602b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 164702b709dfSNick Piggin list_del(&vb->purge); 164802b709dfSNick Piggin free_vmap_block(vb); 164902b709dfSNick Piggin } 165002b709dfSNick Piggin } 165102b709dfSNick Piggin 165202b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 165302b709dfSNick Piggin { 165402b709dfSNick Piggin int cpu; 165502b709dfSNick Piggin 165602b709dfSNick Piggin for_each_possible_cpu(cpu) 165702b709dfSNick Piggin purge_fragmented_blocks(cpu); 165802b709dfSNick Piggin } 165902b709dfSNick Piggin 1660db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 1661db64fe02SNick Piggin { 1662db64fe02SNick Piggin struct vmap_block_queue *vbq; 1663db64fe02SNick Piggin struct vmap_block *vb; 1664cf725ce2SRoman Pen void *vaddr = NULL; 1665db64fe02SNick Piggin unsigned int order; 1666db64fe02SNick Piggin 1667891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1668db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1669aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 1670aa91c4d8SJan Kara /* 1671aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 1672aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 1673aa91c4d8SJan Kara * early. 1674aa91c4d8SJan Kara */ 1675aa91c4d8SJan Kara return NULL; 1676aa91c4d8SJan Kara } 1677db64fe02SNick Piggin order = get_order(size); 1678db64fe02SNick Piggin 1679db64fe02SNick Piggin rcu_read_lock(); 1680db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1681db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1682cf725ce2SRoman Pen unsigned long pages_off; 1683db64fe02SNick Piggin 1684db64fe02SNick Piggin spin_lock(&vb->lock); 1685cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 1686cf725ce2SRoman Pen spin_unlock(&vb->lock); 1687cf725ce2SRoman Pen continue; 1688cf725ce2SRoman Pen } 168902b709dfSNick Piggin 1690cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 1691cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 1692db64fe02SNick Piggin vb->free -= 1UL << order; 1693db64fe02SNick Piggin if (vb->free == 0) { 1694db64fe02SNick Piggin spin_lock(&vbq->lock); 1695de560423SNick Piggin list_del_rcu(&vb->free_list); 1696db64fe02SNick Piggin spin_unlock(&vbq->lock); 1697db64fe02SNick Piggin } 1698cf725ce2SRoman Pen 1699db64fe02SNick Piggin spin_unlock(&vb->lock); 1700db64fe02SNick Piggin break; 1701db64fe02SNick Piggin } 170202b709dfSNick Piggin 17033f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1704db64fe02SNick Piggin rcu_read_unlock(); 1705db64fe02SNick Piggin 1706cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 1707cf725ce2SRoman Pen if (!vaddr) 1708cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 1709db64fe02SNick Piggin 1710cf725ce2SRoman Pen return vaddr; 1711db64fe02SNick Piggin } 1712db64fe02SNick Piggin 171378a0e8c4SChristoph Hellwig static void vb_free(unsigned long addr, unsigned long size) 1714db64fe02SNick Piggin { 1715db64fe02SNick Piggin unsigned long offset; 1716db64fe02SNick Piggin unsigned int order; 1717db64fe02SNick Piggin struct vmap_block *vb; 1718db64fe02SNick Piggin 1719891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1720db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1721b29acbdcSNick Piggin 172278a0e8c4SChristoph Hellwig flush_cache_vunmap(addr, addr + size); 1723b29acbdcSNick Piggin 1724db64fe02SNick Piggin order = get_order(size); 172578a0e8c4SChristoph Hellwig offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 1726*0f14599cSMatthew Wilcox (Oracle) vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr)); 1727db64fe02SNick Piggin 1728b521c43fSChristoph Hellwig unmap_kernel_range_noflush(addr, size); 172964141da5SJeremy Fitzhardinge 17308e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 173178a0e8c4SChristoph Hellwig flush_tlb_kernel_range(addr, addr + size); 173282a2e924SChintan Pandya 1733db64fe02SNick Piggin spin_lock(&vb->lock); 17347d61bfe8SRoman Pen 17357d61bfe8SRoman Pen /* Expand dirty range */ 17367d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 17377d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 1738d086817dSMinChan Kim 1739db64fe02SNick Piggin vb->dirty += 1UL << order; 1740db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 1741de560423SNick Piggin BUG_ON(vb->free); 1742db64fe02SNick Piggin spin_unlock(&vb->lock); 1743db64fe02SNick Piggin free_vmap_block(vb); 1744db64fe02SNick Piggin } else 1745db64fe02SNick Piggin spin_unlock(&vb->lock); 1746db64fe02SNick Piggin } 1747db64fe02SNick Piggin 1748868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 1749db64fe02SNick Piggin { 1750db64fe02SNick Piggin int cpu; 1751db64fe02SNick Piggin 17529b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 17539b463334SJeremy Fitzhardinge return; 17549b463334SJeremy Fitzhardinge 17555803ed29SChristoph Hellwig might_sleep(); 17565803ed29SChristoph Hellwig 1757db64fe02SNick Piggin for_each_possible_cpu(cpu) { 1758db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1759db64fe02SNick Piggin struct vmap_block *vb; 1760db64fe02SNick Piggin 1761db64fe02SNick Piggin rcu_read_lock(); 1762db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1763db64fe02SNick Piggin spin_lock(&vb->lock); 17647d61bfe8SRoman Pen if (vb->dirty) { 17657d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 1766db64fe02SNick Piggin unsigned long s, e; 1767b136be5eSJoonsoo Kim 17687d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 17697d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 1770db64fe02SNick Piggin 17717d61bfe8SRoman Pen start = min(s, start); 17727d61bfe8SRoman Pen end = max(e, end); 17737d61bfe8SRoman Pen 1774db64fe02SNick Piggin flush = 1; 1775db64fe02SNick Piggin } 1776db64fe02SNick Piggin spin_unlock(&vb->lock); 1777db64fe02SNick Piggin } 1778db64fe02SNick Piggin rcu_read_unlock(); 1779db64fe02SNick Piggin } 1780db64fe02SNick Piggin 1781f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 17820574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 17830574ecd1SChristoph Hellwig if (!__purge_vmap_area_lazy(start, end) && flush) 17840574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 1785f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1786db64fe02SNick Piggin } 1787868b104dSRick Edgecombe 1788868b104dSRick Edgecombe /** 1789868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 1790868b104dSRick Edgecombe * 1791868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1792868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you 1793868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual 1794868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries 1795868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping). 1796868b104dSRick Edgecombe * 1797868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1798868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases 1799868b104dSRick Edgecombe * from the vmap layer. 1800868b104dSRick Edgecombe */ 1801868b104dSRick Edgecombe void vm_unmap_aliases(void) 1802868b104dSRick Edgecombe { 1803868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 1804868b104dSRick Edgecombe int flush = 0; 1805868b104dSRick Edgecombe 1806868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush); 1807868b104dSRick Edgecombe } 1808db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1809db64fe02SNick Piggin 1810db64fe02SNick Piggin /** 1811db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1812db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 1813db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1814db64fe02SNick Piggin */ 1815db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 1816db64fe02SNick Piggin { 181765ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1818db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 18199c3acf60SChristoph Hellwig struct vmap_area *va; 1820db64fe02SNick Piggin 18215803ed29SChristoph Hellwig might_sleep(); 1822db64fe02SNick Piggin BUG_ON(!addr); 1823db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 1824db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 1825a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 1826db64fe02SNick Piggin 1827d98c9e83SAndrey Ryabinin kasan_poison_vmalloc(mem, size); 1828d98c9e83SAndrey Ryabinin 18299c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 183005e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size); 183178a0e8c4SChristoph Hellwig vb_free(addr, size); 18329c3acf60SChristoph Hellwig return; 18339c3acf60SChristoph Hellwig } 18349c3acf60SChristoph Hellwig 18359c3acf60SChristoph Hellwig va = find_vmap_area(addr); 18369c3acf60SChristoph Hellwig BUG_ON(!va); 183705e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start, 183805e3ff95SChintan Pandya (va->va_end - va->va_start)); 18399c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 1840db64fe02SNick Piggin } 1841db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 1842db64fe02SNick Piggin 1843db64fe02SNick Piggin /** 1844db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1845db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 1846db64fe02SNick Piggin * @count: number of pages 1847db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 1848e99c97adSRandy Dunlap * 184936437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 185036437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 185136437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 185236437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 185336437638SGioh Kim * the end. Please use this function for short-lived objects. 185436437638SGioh Kim * 1855e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 1856db64fe02SNick Piggin */ 1857d4efd79aSChristoph Hellwig void *vm_map_ram(struct page **pages, unsigned int count, int node) 1858db64fe02SNick Piggin { 185965ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1860db64fe02SNick Piggin unsigned long addr; 1861db64fe02SNick Piggin void *mem; 1862db64fe02SNick Piggin 1863db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 1864db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 1865db64fe02SNick Piggin if (IS_ERR(mem)) 1866db64fe02SNick Piggin return NULL; 1867db64fe02SNick Piggin addr = (unsigned long)mem; 1868db64fe02SNick Piggin } else { 1869db64fe02SNick Piggin struct vmap_area *va; 1870db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 1871db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1872db64fe02SNick Piggin if (IS_ERR(va)) 1873db64fe02SNick Piggin return NULL; 1874db64fe02SNick Piggin 1875db64fe02SNick Piggin addr = va->va_start; 1876db64fe02SNick Piggin mem = (void *)addr; 1877db64fe02SNick Piggin } 1878d98c9e83SAndrey Ryabinin 1879d98c9e83SAndrey Ryabinin kasan_unpoison_vmalloc(mem, size); 1880d98c9e83SAndrey Ryabinin 1881d4efd79aSChristoph Hellwig if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) { 1882db64fe02SNick Piggin vm_unmap_ram(mem, count); 1883db64fe02SNick Piggin return NULL; 1884db64fe02SNick Piggin } 1885db64fe02SNick Piggin return mem; 1886db64fe02SNick Piggin } 1887db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 1888db64fe02SNick Piggin 18894341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 189092eac168SMike Rapoport 1891f0aa6617STejun Heo /** 1892be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 1893be9b7335SNicolas Pitre * @vm: vm_struct to add 1894be9b7335SNicolas Pitre * 1895be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 1896be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1897be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 1898be9b7335SNicolas Pitre * 1899be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1900be9b7335SNicolas Pitre */ 1901be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 1902be9b7335SNicolas Pitre { 1903be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 1904be9b7335SNicolas Pitre 1905be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 1906be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1907be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 1908be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 1909be9b7335SNicolas Pitre break; 1910be9b7335SNicolas Pitre } else 1911be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 1912be9b7335SNicolas Pitre } 1913be9b7335SNicolas Pitre vm->next = *p; 1914be9b7335SNicolas Pitre *p = vm; 1915be9b7335SNicolas Pitre } 1916be9b7335SNicolas Pitre 1917be9b7335SNicolas Pitre /** 1918f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 1919f0aa6617STejun Heo * @vm: vm_struct to register 1920c0c0a293STejun Heo * @align: requested alignment 1921f0aa6617STejun Heo * 1922f0aa6617STejun Heo * This function is used to register kernel vm area before 1923f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 1924f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 1925f0aa6617STejun Heo * vm->addr contains the allocated address. 1926f0aa6617STejun Heo * 1927f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1928f0aa6617STejun Heo */ 1929c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1930f0aa6617STejun Heo { 1931f0aa6617STejun Heo static size_t vm_init_off __initdata; 1932c0c0a293STejun Heo unsigned long addr; 1933f0aa6617STejun Heo 1934c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 1935c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1936c0c0a293STejun Heo 1937c0c0a293STejun Heo vm->addr = (void *)addr; 1938f0aa6617STejun Heo 1939be9b7335SNicolas Pitre vm_area_add_early(vm); 1940f0aa6617STejun Heo } 1941f0aa6617STejun Heo 194268ad4a33SUladzislau Rezki (Sony) static void vmap_init_free_space(void) 194368ad4a33SUladzislau Rezki (Sony) { 194468ad4a33SUladzislau Rezki (Sony) unsigned long vmap_start = 1; 194568ad4a33SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX; 194668ad4a33SUladzislau Rezki (Sony) struct vmap_area *busy, *free; 194768ad4a33SUladzislau Rezki (Sony) 194868ad4a33SUladzislau Rezki (Sony) /* 194968ad4a33SUladzislau Rezki (Sony) * B F B B B F 195068ad4a33SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|- 195168ad4a33SUladzislau Rezki (Sony) * | The KVA space | 195268ad4a33SUladzislau Rezki (Sony) * |<--------------------------------->| 195368ad4a33SUladzislau Rezki (Sony) */ 195468ad4a33SUladzislau Rezki (Sony) list_for_each_entry(busy, &vmap_area_list, list) { 195568ad4a33SUladzislau Rezki (Sony) if (busy->va_start - vmap_start > 0) { 195668ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 195768ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 195868ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 195968ad4a33SUladzislau Rezki (Sony) free->va_end = busy->va_start; 196068ad4a33SUladzislau Rezki (Sony) 196168ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 196268ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 196368ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 196468ad4a33SUladzislau Rezki (Sony) } 196568ad4a33SUladzislau Rezki (Sony) } 196668ad4a33SUladzislau Rezki (Sony) 196768ad4a33SUladzislau Rezki (Sony) vmap_start = busy->va_end; 196868ad4a33SUladzislau Rezki (Sony) } 196968ad4a33SUladzislau Rezki (Sony) 197068ad4a33SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) { 197168ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 197268ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 197368ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 197468ad4a33SUladzislau Rezki (Sony) free->va_end = vmap_end; 197568ad4a33SUladzislau Rezki (Sony) 197668ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 197768ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 197868ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 197968ad4a33SUladzislau Rezki (Sony) } 198068ad4a33SUladzislau Rezki (Sony) } 198168ad4a33SUladzislau Rezki (Sony) } 198268ad4a33SUladzislau Rezki (Sony) 1983db64fe02SNick Piggin void __init vmalloc_init(void) 1984db64fe02SNick Piggin { 1985822c18f2SIvan Kokshaysky struct vmap_area *va; 1986822c18f2SIvan Kokshaysky struct vm_struct *tmp; 1987db64fe02SNick Piggin int i; 1988db64fe02SNick Piggin 198968ad4a33SUladzislau Rezki (Sony) /* 199068ad4a33SUladzislau Rezki (Sony) * Create the cache for vmap_area objects. 199168ad4a33SUladzislau Rezki (Sony) */ 199268ad4a33SUladzislau Rezki (Sony) vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 199368ad4a33SUladzislau Rezki (Sony) 1994db64fe02SNick Piggin for_each_possible_cpu(i) { 1995db64fe02SNick Piggin struct vmap_block_queue *vbq; 199632fcfd40SAl Viro struct vfree_deferred *p; 1997db64fe02SNick Piggin 1998db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1999db64fe02SNick Piggin spin_lock_init(&vbq->lock); 2000db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 200132fcfd40SAl Viro p = &per_cpu(vfree_deferred, i); 200232fcfd40SAl Viro init_llist_head(&p->list); 200332fcfd40SAl Viro INIT_WORK(&p->wq, free_work); 2004db64fe02SNick Piggin } 20059b463334SJeremy Fitzhardinge 2006822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 2007822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 200868ad4a33SUladzislau Rezki (Sony) va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 200968ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va)) 201068ad4a33SUladzislau Rezki (Sony) continue; 201168ad4a33SUladzislau Rezki (Sony) 2012822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 2013822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 2014dbda591dSKyongHo va->vm = tmp; 201568ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 2016822c18f2SIvan Kokshaysky } 2017ca23e405STejun Heo 201868ad4a33SUladzislau Rezki (Sony) /* 201968ad4a33SUladzislau Rezki (Sony) * Now we can initialize a free vmap space. 202068ad4a33SUladzislau Rezki (Sony) */ 202168ad4a33SUladzislau Rezki (Sony) vmap_init_free_space(); 20229b463334SJeremy Fitzhardinge vmap_initialized = true; 2023db64fe02SNick Piggin } 2024db64fe02SNick Piggin 20258fc48985STejun Heo /** 20268fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 20278fc48985STejun Heo * @addr: start of the VM area to unmap 20288fc48985STejun Heo * @size: size of the VM area to unmap 20298fc48985STejun Heo * 20308fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 20318fc48985STejun Heo * the unmapping and tlb after. 20328fc48985STejun Heo */ 2033db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 2034db64fe02SNick Piggin { 2035db64fe02SNick Piggin unsigned long end = addr + size; 2036f6fcba70STejun Heo 2037f6fcba70STejun Heo flush_cache_vunmap(addr, end); 2038b521c43fSChristoph Hellwig unmap_kernel_range_noflush(addr, size); 2039db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 2040db64fe02SNick Piggin } 2041db64fe02SNick Piggin 2042e36176beSUladzislau Rezki (Sony) static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, 2043e36176beSUladzislau Rezki (Sony) struct vmap_area *va, unsigned long flags, const void *caller) 2044cf88c790STejun Heo { 2045cf88c790STejun Heo vm->flags = flags; 2046cf88c790STejun Heo vm->addr = (void *)va->va_start; 2047cf88c790STejun Heo vm->size = va->va_end - va->va_start; 2048cf88c790STejun Heo vm->caller = caller; 2049db1aecafSMinchan Kim va->vm = vm; 2050e36176beSUladzislau Rezki (Sony) } 2051e36176beSUladzislau Rezki (Sony) 2052e36176beSUladzislau Rezki (Sony) static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 2053e36176beSUladzislau Rezki (Sony) unsigned long flags, const void *caller) 2054e36176beSUladzislau Rezki (Sony) { 2055e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 2056e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vm, va, flags, caller); 2057c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2058f5252e00SMitsuo Hayasaka } 2059cf88c790STejun Heo 206020fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 2061f5252e00SMitsuo Hayasaka { 2062d4033afdSJoonsoo Kim /* 206320fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 2064d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 2065d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 2066d4033afdSJoonsoo Kim */ 2067d4033afdSJoonsoo Kim smp_wmb(); 206820fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 2069cf88c790STejun Heo } 2070cf88c790STejun Heo 2071db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 20722dca6999SDavid Miller unsigned long align, unsigned long flags, unsigned long start, 20735e6cafc8SMarek Szyprowski unsigned long end, int node, gfp_t gfp_mask, const void *caller) 2074db64fe02SNick Piggin { 20750006526dSKautuk Consul struct vmap_area *va; 2076db64fe02SNick Piggin struct vm_struct *area; 2077d98c9e83SAndrey Ryabinin unsigned long requested_size = size; 20781da177e4SLinus Torvalds 207952fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 20801da177e4SLinus Torvalds size = PAGE_ALIGN(size); 208131be8309SOGAWA Hirofumi if (unlikely(!size)) 208231be8309SOGAWA Hirofumi return NULL; 20831da177e4SLinus Torvalds 2084252e5c6eSzijun_hu if (flags & VM_IOREMAP) 2085252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 2086252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 2087252e5c6eSzijun_hu 2088cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 20891da177e4SLinus Torvalds if (unlikely(!area)) 20901da177e4SLinus Torvalds return NULL; 20911da177e4SLinus Torvalds 209271394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 20931da177e4SLinus Torvalds size += PAGE_SIZE; 20941da177e4SLinus Torvalds 2095db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 2096db64fe02SNick Piggin if (IS_ERR(va)) { 2097db64fe02SNick Piggin kfree(area); 2098db64fe02SNick Piggin return NULL; 20991da177e4SLinus Torvalds } 21001da177e4SLinus Torvalds 2101d98c9e83SAndrey Ryabinin kasan_unpoison_vmalloc((void *)va->va_start, requested_size); 2102f5252e00SMitsuo Hayasaka 2103d98c9e83SAndrey Ryabinin setup_vmalloc_vm(area, va, flags, caller); 21043c5c3cfbSDaniel Axtens 21051da177e4SLinus Torvalds return area; 21061da177e4SLinus Torvalds } 21071da177e4SLinus Torvalds 2108c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 2109c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 21105e6cafc8SMarek Szyprowski const void *caller) 2111c2968612SBenjamin Herrenschmidt { 211200ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 211300ef2d2fSDavid Rientjes GFP_KERNEL, caller); 2114c2968612SBenjamin Herrenschmidt } 2115c2968612SBenjamin Herrenschmidt 21161da177e4SLinus Torvalds /** 2117183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 21181da177e4SLinus Torvalds * @size: size of the area 21191da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 21201da177e4SLinus Torvalds * 21211da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 21221da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 21231da177e4SLinus Torvalds * on success or %NULL on failure. 2124a862f68aSMike Rapoport * 2125a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure. 21261da177e4SLinus Torvalds */ 21271da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 21281da177e4SLinus Torvalds { 21292dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 213000ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 213100ef2d2fSDavid Rientjes __builtin_return_address(0)); 213223016969SChristoph Lameter } 213323016969SChristoph Lameter 213423016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 21355e6cafc8SMarek Szyprowski const void *caller) 213623016969SChristoph Lameter { 21372dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 213800ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 21391da177e4SLinus Torvalds } 21401da177e4SLinus Torvalds 2141e9da6e99SMarek Szyprowski /** 2142e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 2143e9da6e99SMarek Szyprowski * @addr: base address 2144e9da6e99SMarek Szyprowski * 2145e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 2146e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 2147e9da6e99SMarek Szyprowski * pointer valid. 2148a862f68aSMike Rapoport * 2149a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 2150e9da6e99SMarek Szyprowski */ 2151e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 215283342314SNick Piggin { 2153db64fe02SNick Piggin struct vmap_area *va; 215483342314SNick Piggin 2155db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 2156688fcbfcSPengfei Li if (!va) 21577856dfebSAndi Kleen return NULL; 2158688fcbfcSPengfei Li 2159688fcbfcSPengfei Li return va->vm; 21607856dfebSAndi Kleen } 21617856dfebSAndi Kleen 21621da177e4SLinus Torvalds /** 2163183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 21641da177e4SLinus Torvalds * @addr: base address 21651da177e4SLinus Torvalds * 21661da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 21671da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 21687856dfebSAndi Kleen * on SMP machines, except for its size or flags. 2169a862f68aSMike Rapoport * 2170a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 21711da177e4SLinus Torvalds */ 2172b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 21731da177e4SLinus Torvalds { 2174db64fe02SNick Piggin struct vmap_area *va; 2175db64fe02SNick Piggin 21765803ed29SChristoph Hellwig might_sleep(); 21775803ed29SChristoph Hellwig 2178dd3b8353SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 2179dd3b8353SUladzislau Rezki (Sony) va = __find_vmap_area((unsigned long)addr); 2180688fcbfcSPengfei Li if (va && va->vm) { 2181db1aecafSMinchan Kim struct vm_struct *vm = va->vm; 2182f5252e00SMitsuo Hayasaka 2183c69480adSJoonsoo Kim va->vm = NULL; 2184c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2185c69480adSJoonsoo Kim 2186a5af5aa8SAndrey Ryabinin kasan_free_shadow(vm); 2187dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 2188dd32c279SKAMEZAWA Hiroyuki 2189db64fe02SNick Piggin return vm; 2190db64fe02SNick Piggin } 2191dd3b8353SUladzislau Rezki (Sony) 2192dd3b8353SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 2193db64fe02SNick Piggin return NULL; 21941da177e4SLinus Torvalds } 21951da177e4SLinus Torvalds 2196868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area, 2197868b104dSRick Edgecombe int (*set_direct_map)(struct page *page)) 2198868b104dSRick Edgecombe { 2199868b104dSRick Edgecombe int i; 2200868b104dSRick Edgecombe 2201868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) 2202868b104dSRick Edgecombe if (page_address(area->pages[i])) 2203868b104dSRick Edgecombe set_direct_map(area->pages[i]); 2204868b104dSRick Edgecombe } 2205868b104dSRick Edgecombe 2206868b104dSRick Edgecombe /* Handle removing and resetting vm mappings related to the vm_struct. */ 2207868b104dSRick Edgecombe static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2208868b104dSRick Edgecombe { 2209868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2210868b104dSRick Edgecombe int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 221131e67340SRick Edgecombe int flush_dmap = 0; 2212868b104dSRick Edgecombe int i; 2213868b104dSRick Edgecombe 2214868b104dSRick Edgecombe remove_vm_area(area->addr); 2215868b104dSRick Edgecombe 2216868b104dSRick Edgecombe /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 2217868b104dSRick Edgecombe if (!flush_reset) 2218868b104dSRick Edgecombe return; 2219868b104dSRick Edgecombe 2220868b104dSRick Edgecombe /* 2221868b104dSRick Edgecombe * If not deallocating pages, just do the flush of the VM area and 2222868b104dSRick Edgecombe * return. 2223868b104dSRick Edgecombe */ 2224868b104dSRick Edgecombe if (!deallocate_pages) { 2225868b104dSRick Edgecombe vm_unmap_aliases(); 2226868b104dSRick Edgecombe return; 2227868b104dSRick Edgecombe } 2228868b104dSRick Edgecombe 2229868b104dSRick Edgecombe /* 2230868b104dSRick Edgecombe * If execution gets here, flush the vm mapping and reset the direct 2231868b104dSRick Edgecombe * map. Find the start and end range of the direct mappings to make sure 2232868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map. 2233868b104dSRick Edgecombe */ 2234868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) { 22358e41f872SRick Edgecombe unsigned long addr = (unsigned long)page_address(area->pages[i]); 22368e41f872SRick Edgecombe if (addr) { 2237868b104dSRick Edgecombe start = min(addr, start); 22388e41f872SRick Edgecombe end = max(addr + PAGE_SIZE, end); 223931e67340SRick Edgecombe flush_dmap = 1; 2240868b104dSRick Edgecombe } 2241868b104dSRick Edgecombe } 2242868b104dSRick Edgecombe 2243868b104dSRick Edgecombe /* 2244868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if 2245868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and 2246868b104dSRick Edgecombe * reset the direct map permissions to the default. 2247868b104dSRick Edgecombe */ 2248868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush); 224931e67340SRick Edgecombe _vm_unmap_aliases(start, end, flush_dmap); 2250868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush); 2251868b104dSRick Edgecombe } 2252868b104dSRick Edgecombe 2253b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 22541da177e4SLinus Torvalds { 22551da177e4SLinus Torvalds struct vm_struct *area; 22561da177e4SLinus Torvalds 22571da177e4SLinus Torvalds if (!addr) 22581da177e4SLinus Torvalds return; 22591da177e4SLinus Torvalds 2260e69e9d4aSHATAYAMA Daisuke if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 2261ab15d9b4SDan Carpenter addr)) 22621da177e4SLinus Torvalds return; 22631da177e4SLinus Torvalds 22646ade2032SLiviu Dudau area = find_vm_area(addr); 22651da177e4SLinus Torvalds if (unlikely(!area)) { 22664c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 22671da177e4SLinus Torvalds addr); 22681da177e4SLinus Torvalds return; 22691da177e4SLinus Torvalds } 22701da177e4SLinus Torvalds 227105e3ff95SChintan Pandya debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 227205e3ff95SChintan Pandya debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 22739a11b49aSIngo Molnar 22743c5c3cfbSDaniel Axtens kasan_poison_vmalloc(area->addr, area->size); 22753c5c3cfbSDaniel Axtens 2276868b104dSRick Edgecombe vm_remove_mappings(area, deallocate_pages); 2277868b104dSRick Edgecombe 22781da177e4SLinus Torvalds if (deallocate_pages) { 22791da177e4SLinus Torvalds int i; 22801da177e4SLinus Torvalds 22811da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 2282bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 2283bf53d6f8SChristoph Lameter 2284bf53d6f8SChristoph Lameter BUG_ON(!page); 22854949148aSVladimir Davydov __free_pages(page, 0); 22861da177e4SLinus Torvalds } 228797105f0aSRoman Gushchin atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); 22881da177e4SLinus Torvalds 2289244d63eeSDavid Rientjes kvfree(area->pages); 22901da177e4SLinus Torvalds } 22911da177e4SLinus Torvalds 22921da177e4SLinus Torvalds kfree(area); 22931da177e4SLinus Torvalds return; 22941da177e4SLinus Torvalds } 22951da177e4SLinus Torvalds 2296bf22e37aSAndrey Ryabinin static inline void __vfree_deferred(const void *addr) 2297bf22e37aSAndrey Ryabinin { 2298bf22e37aSAndrey Ryabinin /* 2299bf22e37aSAndrey Ryabinin * Use raw_cpu_ptr() because this can be called from preemptible 2300bf22e37aSAndrey Ryabinin * context. Preemption is absolutely fine here, because the llist_add() 2301bf22e37aSAndrey Ryabinin * implementation is lockless, so it works even if we are adding to 230273221d88SJeongtae Park * another cpu's list. schedule_work() should be fine with this too. 2303bf22e37aSAndrey Ryabinin */ 2304bf22e37aSAndrey Ryabinin struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 2305bf22e37aSAndrey Ryabinin 2306bf22e37aSAndrey Ryabinin if (llist_add((struct llist_node *)addr, &p->list)) 2307bf22e37aSAndrey Ryabinin schedule_work(&p->wq); 2308bf22e37aSAndrey Ryabinin } 2309bf22e37aSAndrey Ryabinin 2310bf22e37aSAndrey Ryabinin /** 2311bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 2312bf22e37aSAndrey Ryabinin * @addr: memory base address 2313bf22e37aSAndrey Ryabinin * 2314bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 2315bf22e37aSAndrey Ryabinin * except NMIs. 2316bf22e37aSAndrey Ryabinin */ 2317bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 2318bf22e37aSAndrey Ryabinin { 2319bf22e37aSAndrey Ryabinin BUG_ON(in_nmi()); 2320bf22e37aSAndrey Ryabinin 2321bf22e37aSAndrey Ryabinin kmemleak_free(addr); 2322bf22e37aSAndrey Ryabinin 2323bf22e37aSAndrey Ryabinin if (!addr) 2324bf22e37aSAndrey Ryabinin return; 2325bf22e37aSAndrey Ryabinin __vfree_deferred(addr); 2326bf22e37aSAndrey Ryabinin } 2327bf22e37aSAndrey Ryabinin 2328c67dc624SRoman Penyaev static void __vfree(const void *addr) 2329c67dc624SRoman Penyaev { 2330c67dc624SRoman Penyaev if (unlikely(in_interrupt())) 2331c67dc624SRoman Penyaev __vfree_deferred(addr); 2332c67dc624SRoman Penyaev else 2333c67dc624SRoman Penyaev __vunmap(addr, 1); 2334c67dc624SRoman Penyaev } 2335c67dc624SRoman Penyaev 23361da177e4SLinus Torvalds /** 23371da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 23381da177e4SLinus Torvalds * @addr: memory base address 23391da177e4SLinus Torvalds * 2340183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 234180e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 234280e93effSPekka Enberg * NULL, no operation is performed. 23431da177e4SLinus Torvalds * 234432fcfd40SAl Viro * Must not be called in NMI context (strictly speaking, only if we don't 234532fcfd40SAl Viro * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 234632fcfd40SAl Viro * conventions for vfree() arch-depenedent would be a really bad idea) 234732fcfd40SAl Viro * 23483ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context. 23493ca4ea3aSAndrey Ryabinin * 23500e056eb5Smchehab@s-opensource.com * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) 23511da177e4SLinus Torvalds */ 2352b3bdda02SChristoph Lameter void vfree(const void *addr) 23531da177e4SLinus Torvalds { 235432fcfd40SAl Viro BUG_ON(in_nmi()); 235589219d37SCatalin Marinas 235689219d37SCatalin Marinas kmemleak_free(addr); 235789219d37SCatalin Marinas 2358a8dda165SAndrey Ryabinin might_sleep_if(!in_interrupt()); 2359a8dda165SAndrey Ryabinin 236032fcfd40SAl Viro if (!addr) 236132fcfd40SAl Viro return; 2362c67dc624SRoman Penyaev 2363c67dc624SRoman Penyaev __vfree(addr); 23641da177e4SLinus Torvalds } 23651da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 23661da177e4SLinus Torvalds 23671da177e4SLinus Torvalds /** 23681da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 23691da177e4SLinus Torvalds * @addr: memory base address 23701da177e4SLinus Torvalds * 23711da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 23721da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 23731da177e4SLinus Torvalds * 237480e93effSPekka Enberg * Must not be called in interrupt context. 23751da177e4SLinus Torvalds */ 2376b3bdda02SChristoph Lameter void vunmap(const void *addr) 23771da177e4SLinus Torvalds { 23781da177e4SLinus Torvalds BUG_ON(in_interrupt()); 237934754b69SPeter Zijlstra might_sleep(); 238032fcfd40SAl Viro if (addr) 23811da177e4SLinus Torvalds __vunmap(addr, 0); 23821da177e4SLinus Torvalds } 23831da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 23841da177e4SLinus Torvalds 23851da177e4SLinus Torvalds /** 23861da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 23871da177e4SLinus Torvalds * @pages: array of page pointers 23881da177e4SLinus Torvalds * @count: number of pages to map 23891da177e4SLinus Torvalds * @flags: vm_area->flags 23901da177e4SLinus Torvalds * @prot: page protection for the mapping 23911da177e4SLinus Torvalds * 23921da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 23931da177e4SLinus Torvalds * space. 2394a862f68aSMike Rapoport * 2395a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 23961da177e4SLinus Torvalds */ 23971da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 23981da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 23991da177e4SLinus Torvalds { 24001da177e4SLinus Torvalds struct vm_struct *area; 240165ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 24021da177e4SLinus Torvalds 240334754b69SPeter Zijlstra might_sleep(); 240434754b69SPeter Zijlstra 2405ca79b0c2SArun KS if (count > totalram_pages()) 24061da177e4SLinus Torvalds return NULL; 24071da177e4SLinus Torvalds 240865ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 240965ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 24101da177e4SLinus Torvalds if (!area) 24111da177e4SLinus Torvalds return NULL; 241223016969SChristoph Lameter 2413cca98e9fSChristoph Hellwig if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), 2414ed1f324cSChristoph Hellwig pages) < 0) { 24151da177e4SLinus Torvalds vunmap(area->addr); 24161da177e4SLinus Torvalds return NULL; 24171da177e4SLinus Torvalds } 24181da177e4SLinus Torvalds 24191da177e4SLinus Torvalds return area->addr; 24201da177e4SLinus Torvalds } 24211da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 24221da177e4SLinus Torvalds 2423e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 24243722e13cSWanpeng Li pgprot_t prot, int node) 24251da177e4SLinus Torvalds { 24261da177e4SLinus Torvalds struct page **pages; 24271da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 2428930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 2429704b862fSLaura Abbott const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; 2430704b862fSLaura Abbott const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? 2431704b862fSLaura Abbott 0 : 2432704b862fSLaura Abbott __GFP_HIGHMEM; 24331da177e4SLinus Torvalds 2434762216abSWanpeng Li nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 24351da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 24361da177e4SLinus Torvalds 24371da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 24388757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 2439704b862fSLaura Abbott pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, 2440f38fcb9cSChristoph Hellwig node, area->caller); 2441286e1ea3SAndrew Morton } else { 2442976d6dfbSJan Beulich pages = kmalloc_node(array_size, nested_gfp, node); 2443286e1ea3SAndrew Morton } 24447ea36242SAustin Kim 24457ea36242SAustin Kim if (!pages) { 24461da177e4SLinus Torvalds remove_vm_area(area->addr); 24471da177e4SLinus Torvalds kfree(area); 24481da177e4SLinus Torvalds return NULL; 24491da177e4SLinus Torvalds } 24501da177e4SLinus Torvalds 24517ea36242SAustin Kim area->pages = pages; 24527ea36242SAustin Kim area->nr_pages = nr_pages; 24537ea36242SAustin Kim 24541da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 2455bf53d6f8SChristoph Lameter struct page *page; 2456bf53d6f8SChristoph Lameter 24574b90951cSJianguo Wu if (node == NUMA_NO_NODE) 2458704b862fSLaura Abbott page = alloc_page(alloc_mask|highmem_mask); 2459930fc45aSChristoph Lameter else 2460704b862fSLaura Abbott page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); 2461bf53d6f8SChristoph Lameter 2462bf53d6f8SChristoph Lameter if (unlikely(!page)) { 24631da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 24641da177e4SLinus Torvalds area->nr_pages = i; 246597105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 24661da177e4SLinus Torvalds goto fail; 24671da177e4SLinus Torvalds } 2468bf53d6f8SChristoph Lameter area->pages[i] = page; 2469dcf61ff0SLiu Xiang if (gfpflags_allow_blocking(gfp_mask)) 2470660654f9SEric Dumazet cond_resched(); 24711da177e4SLinus Torvalds } 247297105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 24731da177e4SLinus Torvalds 2474ed1f324cSChristoph Hellwig if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), 2475ed1f324cSChristoph Hellwig prot, pages) < 0) 24761da177e4SLinus Torvalds goto fail; 2477ed1f324cSChristoph Hellwig 24781da177e4SLinus Torvalds return area->addr; 24791da177e4SLinus Torvalds 24801da177e4SLinus Torvalds fail: 2481a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 24827877cdccSMichal Hocko "vmalloc: allocation failure, allocated %ld of %ld bytes", 248322943ab1SDave Hansen (area->nr_pages*PAGE_SIZE), area->size); 2484c67dc624SRoman Penyaev __vfree(area->addr); 24851da177e4SLinus Torvalds return NULL; 24861da177e4SLinus Torvalds } 24871da177e4SLinus Torvalds 2488d0a21265SDavid Rientjes /** 2489d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 2490d0a21265SDavid Rientjes * @size: allocation size 2491d0a21265SDavid Rientjes * @align: desired alignment 2492d0a21265SDavid Rientjes * @start: vm area range start 2493d0a21265SDavid Rientjes * @end: vm area range end 2494d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 2495d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 2496cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 249700ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2498d0a21265SDavid Rientjes * @caller: caller's return address 2499d0a21265SDavid Rientjes * 2500d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 2501d0a21265SDavid Rientjes * allocator with @gfp_mask flags. Map them into contiguous 2502d0a21265SDavid Rientjes * kernel virtual space, using a pagetable protection of @prot. 2503a862f68aSMike Rapoport * 2504a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 2505d0a21265SDavid Rientjes */ 2506d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 2507d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 2508cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 2509cb9e3c29SAndrey Ryabinin const void *caller) 2510930fc45aSChristoph Lameter { 2511d0a21265SDavid Rientjes struct vm_struct *area; 2512d0a21265SDavid Rientjes void *addr; 2513d0a21265SDavid Rientjes unsigned long real_size = size; 2514d0a21265SDavid Rientjes 2515d0a21265SDavid Rientjes size = PAGE_ALIGN(size); 2516ca79b0c2SArun KS if (!size || (size >> PAGE_SHIFT) > totalram_pages()) 2517de7d2b56SJoe Perches goto fail; 2518d0a21265SDavid Rientjes 2519d98c9e83SAndrey Ryabinin area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED | 2520cb9e3c29SAndrey Ryabinin vm_flags, start, end, node, gfp_mask, caller); 2521d0a21265SDavid Rientjes if (!area) 2522de7d2b56SJoe Perches goto fail; 2523d0a21265SDavid Rientjes 25243722e13cSWanpeng Li addr = __vmalloc_area_node(area, gfp_mask, prot, node); 25251368edf0SMel Gorman if (!addr) 2526b82225f3SWanpeng Li return NULL; 252789219d37SCatalin Marinas 252889219d37SCatalin Marinas /* 252920fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 253020fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 25314341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 2532f5252e00SMitsuo Hayasaka */ 253320fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 2534f5252e00SMitsuo Hayasaka 253594f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask); 253689219d37SCatalin Marinas 253789219d37SCatalin Marinas return addr; 2538de7d2b56SJoe Perches 2539de7d2b56SJoe Perches fail: 2540a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 25417877cdccSMichal Hocko "vmalloc: allocation failure: %lu bytes", real_size); 2542de7d2b56SJoe Perches return NULL; 2543930fc45aSChristoph Lameter } 2544930fc45aSChristoph Lameter 25451da177e4SLinus Torvalds /** 2546930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 25471da177e4SLinus Torvalds * @size: allocation size 25482dca6999SDavid Miller * @align: desired alignment 25491da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 255000ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2551c85d194bSRandy Dunlap * @caller: caller's return address 25521da177e4SLinus Torvalds * 2553f38fcb9cSChristoph Hellwig * Allocate enough pages to cover @size from the page level allocator with 2554f38fcb9cSChristoph Hellwig * @gfp_mask flags. Map them into contiguous kernel virtual space. 2555a7c3e901SMichal Hocko * 2556dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 2557a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported 2558a7c3e901SMichal Hocko * 2559a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted 2560a7c3e901SMichal Hocko * with mm people. 2561a862f68aSMike Rapoport * 2562a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 25631da177e4SLinus Torvalds */ 25642b905948SChristoph Hellwig void *__vmalloc_node(unsigned long size, unsigned long align, 2565f38fcb9cSChristoph Hellwig gfp_t gfp_mask, int node, const void *caller) 25661da177e4SLinus Torvalds { 2567d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 2568f38fcb9cSChristoph Hellwig gfp_mask, PAGE_KERNEL, 0, node, caller); 25691da177e4SLinus Torvalds } 2570c3f896dcSChristoph Hellwig /* 2571c3f896dcSChristoph Hellwig * This is only for performance analysis of vmalloc and stress purpose. 2572c3f896dcSChristoph Hellwig * It is required by vmalloc test module, therefore do not use it other 2573c3f896dcSChristoph Hellwig * than that. 2574c3f896dcSChristoph Hellwig */ 2575c3f896dcSChristoph Hellwig #ifdef CONFIG_TEST_VMALLOC_MODULE 2576c3f896dcSChristoph Hellwig EXPORT_SYMBOL_GPL(__vmalloc_node); 2577c3f896dcSChristoph Hellwig #endif 25781da177e4SLinus Torvalds 257988dca4caSChristoph Hellwig void *__vmalloc(unsigned long size, gfp_t gfp_mask) 2580930fc45aSChristoph Lameter { 2581f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, 258223016969SChristoph Lameter __builtin_return_address(0)); 2583930fc45aSChristoph Lameter } 25841da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 25851da177e4SLinus Torvalds 25861da177e4SLinus Torvalds /** 25871da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 25881da177e4SLinus Torvalds * @size: allocation size 258992eac168SMike Rapoport * 25901da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 25911da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 25921da177e4SLinus Torvalds * 2593c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 25941da177e4SLinus Torvalds * use __vmalloc() instead. 2595a862f68aSMike Rapoport * 2596a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 25971da177e4SLinus Torvalds */ 25981da177e4SLinus Torvalds void *vmalloc(unsigned long size) 25991da177e4SLinus Torvalds { 26004d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, 26014d39d728SChristoph Hellwig __builtin_return_address(0)); 26021da177e4SLinus Torvalds } 26031da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 26041da177e4SLinus Torvalds 2605930fc45aSChristoph Lameter /** 2606e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 2607e1ca7788SDave Young * @size: allocation size 260892eac168SMike Rapoport * 2609e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 2610e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 2611e1ca7788SDave Young * The memory allocated is set to zero. 2612e1ca7788SDave Young * 2613e1ca7788SDave Young * For tight control over page level allocator and protection flags 2614e1ca7788SDave Young * use __vmalloc() instead. 2615a862f68aSMike Rapoport * 2616a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2617e1ca7788SDave Young */ 2618e1ca7788SDave Young void *vzalloc(unsigned long size) 2619e1ca7788SDave Young { 26204d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 26214d39d728SChristoph Hellwig __builtin_return_address(0)); 2622e1ca7788SDave Young } 2623e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 2624e1ca7788SDave Young 2625e1ca7788SDave Young /** 2626ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 262783342314SNick Piggin * @size: allocation size 2628ead04089SRolf Eike Beer * 2629ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 2630ead04089SRolf Eike Beer * without leaking data. 2631a862f68aSMike Rapoport * 2632a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 263383342314SNick Piggin */ 263483342314SNick Piggin void *vmalloc_user(unsigned long size) 263583342314SNick Piggin { 2636bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2637bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 2638bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 263900ef2d2fSDavid Rientjes __builtin_return_address(0)); 264083342314SNick Piggin } 264183342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 264283342314SNick Piggin 264383342314SNick Piggin /** 2644930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 2645930fc45aSChristoph Lameter * @size: allocation size 2646d44e0780SRandy Dunlap * @node: numa node 2647930fc45aSChristoph Lameter * 2648930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 2649930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 2650930fc45aSChristoph Lameter * 2651c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 2652930fc45aSChristoph Lameter * use __vmalloc() instead. 2653a862f68aSMike Rapoport * 2654a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2655930fc45aSChristoph Lameter */ 2656930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 2657930fc45aSChristoph Lameter { 2658f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, node, 2659f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 2660930fc45aSChristoph Lameter } 2661930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 2662930fc45aSChristoph Lameter 2663e1ca7788SDave Young /** 2664e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 2665e1ca7788SDave Young * @size: allocation size 2666e1ca7788SDave Young * @node: numa node 2667e1ca7788SDave Young * 2668e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 2669e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 2670e1ca7788SDave Young * The memory allocated is set to zero. 2671e1ca7788SDave Young * 2672a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2673e1ca7788SDave Young */ 2674e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 2675e1ca7788SDave Young { 26764d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, 26774d39d728SChristoph Hellwig __builtin_return_address(0)); 2678e1ca7788SDave Young } 2679e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 2680e1ca7788SDave Young 26810d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 2682698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 26830d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 2684698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 26850d08e0d3SAndi Kleen #else 2686698d0831SMichal Hocko /* 2687698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others 2688698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone. 2689698d0831SMichal Hocko */ 2690698d0831SMichal Hocko #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 26910d08e0d3SAndi Kleen #endif 26920d08e0d3SAndi Kleen 26931da177e4SLinus Torvalds /** 26941da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 26951da177e4SLinus Torvalds * @size: allocation size 26961da177e4SLinus Torvalds * 26971da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 26981da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 2699a862f68aSMike Rapoport * 2700a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 27011da177e4SLinus Torvalds */ 27021da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 27031da177e4SLinus Torvalds { 2704f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 2705f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 27061da177e4SLinus Torvalds } 27071da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 27081da177e4SLinus Torvalds 270983342314SNick Piggin /** 2710ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 271183342314SNick Piggin * @size: allocation size 2712ead04089SRolf Eike Beer * 2713ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 2714ead04089SRolf Eike Beer * mapped to userspace without leaking data. 2715a862f68aSMike Rapoport * 2716a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 271783342314SNick Piggin */ 271883342314SNick Piggin void *vmalloc_32_user(unsigned long size) 271983342314SNick Piggin { 2720bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2721bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 2722bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 27235a82ac71SRoman Penyaev __builtin_return_address(0)); 272483342314SNick Piggin } 272583342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 272683342314SNick Piggin 2727d0107eb0SKAMEZAWA Hiroyuki /* 2728d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 2729d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 2730d0107eb0SKAMEZAWA Hiroyuki */ 2731d0107eb0SKAMEZAWA Hiroyuki 2732d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 2733d0107eb0SKAMEZAWA Hiroyuki { 2734d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2735d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2736d0107eb0SKAMEZAWA Hiroyuki 2737d0107eb0SKAMEZAWA Hiroyuki while (count) { 2738d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2739d0107eb0SKAMEZAWA Hiroyuki 2740891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2741d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2742d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2743d0107eb0SKAMEZAWA Hiroyuki length = count; 2744d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2745d0107eb0SKAMEZAWA Hiroyuki /* 2746d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2747d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2748d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2749d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2750d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2751d0107eb0SKAMEZAWA Hiroyuki */ 2752d0107eb0SKAMEZAWA Hiroyuki if (p) { 2753d0107eb0SKAMEZAWA Hiroyuki /* 2754d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2755d0107eb0SKAMEZAWA Hiroyuki * function description) 2756d0107eb0SKAMEZAWA Hiroyuki */ 27579b04c5feSCong Wang void *map = kmap_atomic(p); 2758d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 27599b04c5feSCong Wang kunmap_atomic(map); 2760d0107eb0SKAMEZAWA Hiroyuki } else 2761d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 2762d0107eb0SKAMEZAWA Hiroyuki 2763d0107eb0SKAMEZAWA Hiroyuki addr += length; 2764d0107eb0SKAMEZAWA Hiroyuki buf += length; 2765d0107eb0SKAMEZAWA Hiroyuki copied += length; 2766d0107eb0SKAMEZAWA Hiroyuki count -= length; 2767d0107eb0SKAMEZAWA Hiroyuki } 2768d0107eb0SKAMEZAWA Hiroyuki return copied; 2769d0107eb0SKAMEZAWA Hiroyuki } 2770d0107eb0SKAMEZAWA Hiroyuki 2771d0107eb0SKAMEZAWA Hiroyuki static int aligned_vwrite(char *buf, char *addr, unsigned long count) 2772d0107eb0SKAMEZAWA Hiroyuki { 2773d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2774d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2775d0107eb0SKAMEZAWA Hiroyuki 2776d0107eb0SKAMEZAWA Hiroyuki while (count) { 2777d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2778d0107eb0SKAMEZAWA Hiroyuki 2779891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2780d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2781d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2782d0107eb0SKAMEZAWA Hiroyuki length = count; 2783d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2784d0107eb0SKAMEZAWA Hiroyuki /* 2785d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2786d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2787d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2788d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2789d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2790d0107eb0SKAMEZAWA Hiroyuki */ 2791d0107eb0SKAMEZAWA Hiroyuki if (p) { 2792d0107eb0SKAMEZAWA Hiroyuki /* 2793d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2794d0107eb0SKAMEZAWA Hiroyuki * function description) 2795d0107eb0SKAMEZAWA Hiroyuki */ 27969b04c5feSCong Wang void *map = kmap_atomic(p); 2797d0107eb0SKAMEZAWA Hiroyuki memcpy(map + offset, buf, length); 27989b04c5feSCong Wang kunmap_atomic(map); 2799d0107eb0SKAMEZAWA Hiroyuki } 2800d0107eb0SKAMEZAWA Hiroyuki addr += length; 2801d0107eb0SKAMEZAWA Hiroyuki buf += length; 2802d0107eb0SKAMEZAWA Hiroyuki copied += length; 2803d0107eb0SKAMEZAWA Hiroyuki count -= length; 2804d0107eb0SKAMEZAWA Hiroyuki } 2805d0107eb0SKAMEZAWA Hiroyuki return copied; 2806d0107eb0SKAMEZAWA Hiroyuki } 2807d0107eb0SKAMEZAWA Hiroyuki 2808d0107eb0SKAMEZAWA Hiroyuki /** 2809d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 2810d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 2811d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2812d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2813d0107eb0SKAMEZAWA Hiroyuki * 2814d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2815d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 2816d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 2817d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 2818d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2819d0107eb0SKAMEZAWA Hiroyuki * 2820d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2821a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2822d0107eb0SKAMEZAWA Hiroyuki * 2823d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 2824d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2825d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2826d9009d67SGeert Uytterhoeven * any information, as /dev/kmem. 2827a862f68aSMike Rapoport * 2828a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased 2829a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't 2830a862f68aSMike Rapoport * include any intersection with valid vmalloc area 2831d0107eb0SKAMEZAWA Hiroyuki */ 28321da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 28331da177e4SLinus Torvalds { 2834e81ce85fSJoonsoo Kim struct vmap_area *va; 2835e81ce85fSJoonsoo Kim struct vm_struct *vm; 28361da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 2837d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 28381da177e4SLinus Torvalds unsigned long n; 28391da177e4SLinus Torvalds 28401da177e4SLinus Torvalds /* Don't allow overflow */ 28411da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 28421da177e4SLinus Torvalds count = -(unsigned long) addr; 28431da177e4SLinus Torvalds 2844e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2845e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2846e81ce85fSJoonsoo Kim if (!count) 2847e81ce85fSJoonsoo Kim break; 2848e81ce85fSJoonsoo Kim 2849688fcbfcSPengfei Li if (!va->vm) 2850e81ce85fSJoonsoo Kim continue; 2851e81ce85fSJoonsoo Kim 2852e81ce85fSJoonsoo Kim vm = va->vm; 2853e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2854762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 28551da177e4SLinus Torvalds continue; 28561da177e4SLinus Torvalds while (addr < vaddr) { 28571da177e4SLinus Torvalds if (count == 0) 28581da177e4SLinus Torvalds goto finished; 28591da177e4SLinus Torvalds *buf = '\0'; 28601da177e4SLinus Torvalds buf++; 28611da177e4SLinus Torvalds addr++; 28621da177e4SLinus Torvalds count--; 28631da177e4SLinus Torvalds } 2864762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2865d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2866d0107eb0SKAMEZAWA Hiroyuki n = count; 2867e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) 2868d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 2869d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 2870d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 2871d0107eb0SKAMEZAWA Hiroyuki buf += n; 2872d0107eb0SKAMEZAWA Hiroyuki addr += n; 2873d0107eb0SKAMEZAWA Hiroyuki count -= n; 28741da177e4SLinus Torvalds } 28751da177e4SLinus Torvalds finished: 2876e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2877d0107eb0SKAMEZAWA Hiroyuki 2878d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 2879d0107eb0SKAMEZAWA Hiroyuki return 0; 2880d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 2881d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 2882d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 2883d0107eb0SKAMEZAWA Hiroyuki 2884d0107eb0SKAMEZAWA Hiroyuki return buflen; 28851da177e4SLinus Torvalds } 28861da177e4SLinus Torvalds 2887d0107eb0SKAMEZAWA Hiroyuki /** 2888d0107eb0SKAMEZAWA Hiroyuki * vwrite() - write vmalloc area in a safe way. 2889d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for source data 2890d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2891d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2892d0107eb0SKAMEZAWA Hiroyuki * 2893d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2894d0107eb0SKAMEZAWA Hiroyuki * copy data from a buffer to the given addr. If specified range of 2895d0107eb0SKAMEZAWA Hiroyuki * [addr...addr+count) includes some valid address, data is copied from 2896d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, no copy to hole. 2897d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2898d0107eb0SKAMEZAWA Hiroyuki * 2899d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2900a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2901d0107eb0SKAMEZAWA Hiroyuki * 2902d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vwrite() is never necessary because the caller 2903d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2904d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2905d9009d67SGeert Uytterhoeven * any information, as /dev/kmem. 2906a862f68aSMike Rapoport * 2907a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be 2908a862f68aSMike Rapoport * increased (same number as @count) or %0 if [addr...addr+count) 2909a862f68aSMike Rapoport * doesn't include any intersection with valid vmalloc area 2910d0107eb0SKAMEZAWA Hiroyuki */ 29111da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 29121da177e4SLinus Torvalds { 2913e81ce85fSJoonsoo Kim struct vmap_area *va; 2914e81ce85fSJoonsoo Kim struct vm_struct *vm; 2915d0107eb0SKAMEZAWA Hiroyuki char *vaddr; 2916d0107eb0SKAMEZAWA Hiroyuki unsigned long n, buflen; 2917d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 29181da177e4SLinus Torvalds 29191da177e4SLinus Torvalds /* Don't allow overflow */ 29201da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 29211da177e4SLinus Torvalds count = -(unsigned long) addr; 2922d0107eb0SKAMEZAWA Hiroyuki buflen = count; 29231da177e4SLinus Torvalds 2924e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2925e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2926e81ce85fSJoonsoo Kim if (!count) 2927e81ce85fSJoonsoo Kim break; 2928e81ce85fSJoonsoo Kim 2929688fcbfcSPengfei Li if (!va->vm) 2930e81ce85fSJoonsoo Kim continue; 2931e81ce85fSJoonsoo Kim 2932e81ce85fSJoonsoo Kim vm = va->vm; 2933e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2934762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 29351da177e4SLinus Torvalds continue; 29361da177e4SLinus Torvalds while (addr < vaddr) { 29371da177e4SLinus Torvalds if (count == 0) 29381da177e4SLinus Torvalds goto finished; 29391da177e4SLinus Torvalds buf++; 29401da177e4SLinus Torvalds addr++; 29411da177e4SLinus Torvalds count--; 29421da177e4SLinus Torvalds } 2943762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2944d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2945d0107eb0SKAMEZAWA Hiroyuki n = count; 2946e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) { 2947d0107eb0SKAMEZAWA Hiroyuki aligned_vwrite(buf, addr, n); 2948d0107eb0SKAMEZAWA Hiroyuki copied++; 2949d0107eb0SKAMEZAWA Hiroyuki } 2950d0107eb0SKAMEZAWA Hiroyuki buf += n; 2951d0107eb0SKAMEZAWA Hiroyuki addr += n; 2952d0107eb0SKAMEZAWA Hiroyuki count -= n; 29531da177e4SLinus Torvalds } 29541da177e4SLinus Torvalds finished: 2955e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2956d0107eb0SKAMEZAWA Hiroyuki if (!copied) 2957d0107eb0SKAMEZAWA Hiroyuki return 0; 2958d0107eb0SKAMEZAWA Hiroyuki return buflen; 29591da177e4SLinus Torvalds } 296083342314SNick Piggin 296183342314SNick Piggin /** 2962e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 2963e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 2964e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 2965e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 2966bdebd6a2SJann Horn * @pgoff: offset from @kaddr to start at 2967e69e9d4aSHATAYAMA Daisuke * @size: size of map area 2968e69e9d4aSHATAYAMA Daisuke * 2969e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 2970e69e9d4aSHATAYAMA Daisuke * 2971e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 2972e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 2973e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 2974e69e9d4aSHATAYAMA Daisuke * met. 2975e69e9d4aSHATAYAMA Daisuke * 2976e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 2977e69e9d4aSHATAYAMA Daisuke */ 2978e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 2979bdebd6a2SJann Horn void *kaddr, unsigned long pgoff, 2980bdebd6a2SJann Horn unsigned long size) 2981e69e9d4aSHATAYAMA Daisuke { 2982e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 2983bdebd6a2SJann Horn unsigned long off; 2984bdebd6a2SJann Horn unsigned long end_index; 2985bdebd6a2SJann Horn 2986bdebd6a2SJann Horn if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 2987bdebd6a2SJann Horn return -EINVAL; 2988e69e9d4aSHATAYAMA Daisuke 2989e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 2990e69e9d4aSHATAYAMA Daisuke 2991e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 2992e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2993e69e9d4aSHATAYAMA Daisuke 2994e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 2995e69e9d4aSHATAYAMA Daisuke if (!area) 2996e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2997e69e9d4aSHATAYAMA Daisuke 2998fe9041c2SChristoph Hellwig if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 2999e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3000e69e9d4aSHATAYAMA Daisuke 3001bdebd6a2SJann Horn if (check_add_overflow(size, off, &end_index) || 3002bdebd6a2SJann Horn end_index > get_vm_area_size(area)) 3003e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3004bdebd6a2SJann Horn kaddr += off; 3005e69e9d4aSHATAYAMA Daisuke 3006e69e9d4aSHATAYAMA Daisuke do { 3007e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 3008e69e9d4aSHATAYAMA Daisuke int ret; 3009e69e9d4aSHATAYAMA Daisuke 3010e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 3011e69e9d4aSHATAYAMA Daisuke if (ret) 3012e69e9d4aSHATAYAMA Daisuke return ret; 3013e69e9d4aSHATAYAMA Daisuke 3014e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 3015e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 3016e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 3017e69e9d4aSHATAYAMA Daisuke } while (size > 0); 3018e69e9d4aSHATAYAMA Daisuke 3019e69e9d4aSHATAYAMA Daisuke vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3020e69e9d4aSHATAYAMA Daisuke 3021e69e9d4aSHATAYAMA Daisuke return 0; 3022e69e9d4aSHATAYAMA Daisuke } 3023e69e9d4aSHATAYAMA Daisuke EXPORT_SYMBOL(remap_vmalloc_range_partial); 3024e69e9d4aSHATAYAMA Daisuke 3025e69e9d4aSHATAYAMA Daisuke /** 302683342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 302783342314SNick Piggin * @vma: vma to cover (map full range of vma) 302883342314SNick Piggin * @addr: vmalloc memory 302983342314SNick Piggin * @pgoff: number of pages into addr before first page to map 30307682486bSRandy Dunlap * 30317682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 303283342314SNick Piggin * 303383342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 303483342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 303583342314SNick Piggin * that criteria isn't met. 303683342314SNick Piggin * 303772fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 303883342314SNick Piggin */ 303983342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 304083342314SNick Piggin unsigned long pgoff) 304183342314SNick Piggin { 3042e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 3043bdebd6a2SJann Horn addr, pgoff, 3044e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 304583342314SNick Piggin } 304683342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 304783342314SNick Piggin 30488b1e0f81SAnshuman Khandual static int f(pte_t *pte, unsigned long addr, void *data) 30495f4352fbSJeremy Fitzhardinge { 3050cd12909cSDavid Vrabel pte_t ***p = data; 3051cd12909cSDavid Vrabel 3052cd12909cSDavid Vrabel if (p) { 3053cd12909cSDavid Vrabel *(*p) = pte; 3054cd12909cSDavid Vrabel (*p)++; 3055cd12909cSDavid Vrabel } 30565f4352fbSJeremy Fitzhardinge return 0; 30575f4352fbSJeremy Fitzhardinge } 30585f4352fbSJeremy Fitzhardinge 30595f4352fbSJeremy Fitzhardinge /** 30605f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 30615f4352fbSJeremy Fitzhardinge * @size: size of the area 3062cd12909cSDavid Vrabel * @ptes: returns the PTEs for the address space 30637682486bSRandy Dunlap * 30647682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 30655f4352fbSJeremy Fitzhardinge * 30665f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 30675f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 3068cd12909cSDavid Vrabel * are created. 3069cd12909cSDavid Vrabel * 3070cd12909cSDavid Vrabel * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 3071cd12909cSDavid Vrabel * allocated for the VM area are returned. 30725f4352fbSJeremy Fitzhardinge */ 3073cd12909cSDavid Vrabel struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 30745f4352fbSJeremy Fitzhardinge { 30755f4352fbSJeremy Fitzhardinge struct vm_struct *area; 30765f4352fbSJeremy Fitzhardinge 307723016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 307823016969SChristoph Lameter __builtin_return_address(0)); 30795f4352fbSJeremy Fitzhardinge if (area == NULL) 30805f4352fbSJeremy Fitzhardinge return NULL; 30815f4352fbSJeremy Fitzhardinge 30825f4352fbSJeremy Fitzhardinge /* 30835f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 30845f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 30855f4352fbSJeremy Fitzhardinge */ 30865f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 3087cd12909cSDavid Vrabel size, f, ptes ? &ptes : NULL)) { 30885f4352fbSJeremy Fitzhardinge free_vm_area(area); 30895f4352fbSJeremy Fitzhardinge return NULL; 30905f4352fbSJeremy Fitzhardinge } 30915f4352fbSJeremy Fitzhardinge 30925f4352fbSJeremy Fitzhardinge return area; 30935f4352fbSJeremy Fitzhardinge } 30945f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 30955f4352fbSJeremy Fitzhardinge 30965f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 30975f4352fbSJeremy Fitzhardinge { 30985f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 30995f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 31005f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 31015f4352fbSJeremy Fitzhardinge kfree(area); 31025f4352fbSJeremy Fitzhardinge } 31035f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 3104a10aa579SChristoph Lameter 31054f8b02b4STejun Heo #ifdef CONFIG_SMP 3106ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 3107ca23e405STejun Heo { 31084583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 3109ca23e405STejun Heo } 3110ca23e405STejun Heo 3111ca23e405STejun Heo /** 311268ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 311368ad4a33SUladzislau Rezki (Sony) * @addr: target address 3114ca23e405STejun Heo * 311568ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area 311668ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned 311768ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL 311868ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr. 3119ca23e405STejun Heo */ 312068ad4a33SUladzislau Rezki (Sony) static struct vmap_area * 312168ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr) 3122ca23e405STejun Heo { 312368ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp; 312468ad4a33SUladzislau Rezki (Sony) struct rb_node *n; 312568ad4a33SUladzislau Rezki (Sony) 312668ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node; 312768ad4a33SUladzislau Rezki (Sony) va = NULL; 3128ca23e405STejun Heo 3129ca23e405STejun Heo while (n) { 313068ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node); 313168ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) { 313268ad4a33SUladzislau Rezki (Sony) va = tmp; 313368ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr) 3134ca23e405STejun Heo break; 3135ca23e405STejun Heo 313668ad4a33SUladzislau Rezki (Sony) n = n->rb_right; 3137ca23e405STejun Heo } else { 313868ad4a33SUladzislau Rezki (Sony) n = n->rb_left; 3139ca23e405STejun Heo } 314068ad4a33SUladzislau Rezki (Sony) } 314168ad4a33SUladzislau Rezki (Sony) 314268ad4a33SUladzislau Rezki (Sony) return va; 3143ca23e405STejun Heo } 3144ca23e405STejun Heo 3145ca23e405STejun Heo /** 314668ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address 314768ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END 314868ad4a33SUladzislau Rezki (Sony) * @va: 314968ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order); 315068ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address. 3151ca23e405STejun Heo * 315268ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area 3153ca23e405STejun Heo */ 315468ad4a33SUladzislau Rezki (Sony) static unsigned long 315568ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 3156ca23e405STejun Heo { 315768ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3158ca23e405STejun Heo unsigned long addr; 3159ca23e405STejun Heo 316068ad4a33SUladzislau Rezki (Sony) if (likely(*va)) { 316168ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va), 316268ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) { 316368ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 316468ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr) 316568ad4a33SUladzislau Rezki (Sony) return addr; 316668ad4a33SUladzislau Rezki (Sony) } 3167ca23e405STejun Heo } 3168ca23e405STejun Heo 316968ad4a33SUladzislau Rezki (Sony) return 0; 3170ca23e405STejun Heo } 3171ca23e405STejun Heo 3172ca23e405STejun Heo /** 3173ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 3174ca23e405STejun Heo * @offsets: array containing offset of each area 3175ca23e405STejun Heo * @sizes: array containing size of each area 3176ca23e405STejun Heo * @nr_vms: the number of areas to allocate 3177ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 3178ca23e405STejun Heo * 3179ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 3180ca23e405STejun Heo * vm_structs on success, %NULL on failure 3181ca23e405STejun Heo * 3182ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 3183ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 3184ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 3185ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 3186ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 3187ec3f64fcSDavid Rientjes * areas are allocated from top. 3188ca23e405STejun Heo * 3189ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 319068ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking 319168ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the 319268ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till 319368ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted 319468ad4a33SUladzislau Rezki (Sony) * and the result is returned. 3195ca23e405STejun Heo */ 3196ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 3197ca23e405STejun Heo const size_t *sizes, int nr_vms, 3198ec3f64fcSDavid Rientjes size_t align) 3199ca23e405STejun Heo { 3200ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 3201ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 320268ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va; 3203ca23e405STejun Heo struct vm_struct **vms; 3204ca23e405STejun Heo int area, area2, last_area, term_area; 3205253a496dSDaniel Axtens unsigned long base, start, size, end, last_end, orig_start, orig_end; 3206ca23e405STejun Heo bool purged = false; 320768ad4a33SUladzislau Rezki (Sony) enum fit_type type; 3208ca23e405STejun Heo 3209ca23e405STejun Heo /* verify parameters and allocate data structures */ 3210891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 3211ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 3212ca23e405STejun Heo start = offsets[area]; 3213ca23e405STejun Heo end = start + sizes[area]; 3214ca23e405STejun Heo 3215ca23e405STejun Heo /* is everything aligned properly? */ 3216ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 3217ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 3218ca23e405STejun Heo 3219ca23e405STejun Heo /* detect the area with the highest address */ 3220ca23e405STejun Heo if (start > offsets[last_area]) 3221ca23e405STejun Heo last_area = area; 3222ca23e405STejun Heo 3223c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) { 3224ca23e405STejun Heo unsigned long start2 = offsets[area2]; 3225ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 3226ca23e405STejun Heo 3227c568da28SWei Yang BUG_ON(start2 < end && start < end2); 3228ca23e405STejun Heo } 3229ca23e405STejun Heo } 3230ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 3231ca23e405STejun Heo 3232ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 3233ca23e405STejun Heo WARN_ON(true); 3234ca23e405STejun Heo return NULL; 3235ca23e405STejun Heo } 3236ca23e405STejun Heo 32374d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 32384d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 3239ca23e405STejun Heo if (!vas || !vms) 3240f1db7afdSKautuk Consul goto err_free2; 3241ca23e405STejun Heo 3242ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 324368ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 3244ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 3245ca23e405STejun Heo if (!vas[area] || !vms[area]) 3246ca23e405STejun Heo goto err_free; 3247ca23e405STejun Heo } 3248ca23e405STejun Heo retry: 3249e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 3250ca23e405STejun Heo 3251ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 3252ca23e405STejun Heo area = term_area = last_area; 3253ca23e405STejun Heo start = offsets[area]; 3254ca23e405STejun Heo end = start + sizes[area]; 3255ca23e405STejun Heo 325668ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end); 325768ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3258ca23e405STejun Heo 3259ca23e405STejun Heo while (true) { 3260ca23e405STejun Heo /* 3261ca23e405STejun Heo * base might have underflowed, add last_end before 3262ca23e405STejun Heo * comparing. 3263ca23e405STejun Heo */ 326468ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end) 326568ad4a33SUladzislau Rezki (Sony) goto overflow; 3266ca23e405STejun Heo 3267ca23e405STejun Heo /* 326868ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found. 3269ca23e405STejun Heo */ 327068ad4a33SUladzislau Rezki (Sony) if (va == NULL) 327168ad4a33SUladzislau Rezki (Sony) goto overflow; 3272ca23e405STejun Heo 3273ca23e405STejun Heo /* 3274d8cc323dSQiujun Huang * If required width exceeds current VA block, move 32755336e52cSKuppuswamy Sathyanarayanan * base downwards and then recheck. 32765336e52cSKuppuswamy Sathyanarayanan */ 32775336e52cSKuppuswamy Sathyanarayanan if (base + end > va->va_end) { 32785336e52cSKuppuswamy Sathyanarayanan base = pvm_determine_end_from_reverse(&va, align) - end; 32795336e52cSKuppuswamy Sathyanarayanan term_area = area; 32805336e52cSKuppuswamy Sathyanarayanan continue; 32815336e52cSKuppuswamy Sathyanarayanan } 32825336e52cSKuppuswamy Sathyanarayanan 32835336e52cSKuppuswamy Sathyanarayanan /* 328468ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck. 3285ca23e405STejun Heo */ 32865336e52cSKuppuswamy Sathyanarayanan if (base + start < va->va_start) { 328768ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node)); 328868ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3289ca23e405STejun Heo term_area = area; 3290ca23e405STejun Heo continue; 3291ca23e405STejun Heo } 3292ca23e405STejun Heo 3293ca23e405STejun Heo /* 3294ca23e405STejun Heo * This area fits, move on to the previous one. If 3295ca23e405STejun Heo * the previous one is the terminal one, we're done. 3296ca23e405STejun Heo */ 3297ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 3298ca23e405STejun Heo if (area == term_area) 3299ca23e405STejun Heo break; 330068ad4a33SUladzislau Rezki (Sony) 3301ca23e405STejun Heo start = offsets[area]; 3302ca23e405STejun Heo end = start + sizes[area]; 330368ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end); 3304ca23e405STejun Heo } 330568ad4a33SUladzislau Rezki (Sony) 3306ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 3307ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 330868ad4a33SUladzislau Rezki (Sony) int ret; 3309ca23e405STejun Heo 331068ad4a33SUladzislau Rezki (Sony) start = base + offsets[area]; 331168ad4a33SUladzislau Rezki (Sony) size = sizes[area]; 331268ad4a33SUladzislau Rezki (Sony) 331368ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start); 331468ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL)) 331568ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 331668ad4a33SUladzislau Rezki (Sony) goto recovery; 331768ad4a33SUladzislau Rezki (Sony) 331868ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, start, size); 331968ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 332068ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 332168ad4a33SUladzislau Rezki (Sony) goto recovery; 332268ad4a33SUladzislau Rezki (Sony) 332368ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, start, size, type); 332468ad4a33SUladzislau Rezki (Sony) if (unlikely(ret)) 332568ad4a33SUladzislau Rezki (Sony) goto recovery; 332668ad4a33SUladzislau Rezki (Sony) 332768ad4a33SUladzislau Rezki (Sony) /* Allocated area. */ 332868ad4a33SUladzislau Rezki (Sony) va = vas[area]; 332968ad4a33SUladzislau Rezki (Sony) va->va_start = start; 333068ad4a33SUladzislau Rezki (Sony) va->va_end = start + size; 3331ca23e405STejun Heo } 3332ca23e405STejun Heo 3333e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 3334ca23e405STejun Heo 3335253a496dSDaniel Axtens /* populate the kasan shadow space */ 3336253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 3337253a496dSDaniel Axtens if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 3338253a496dSDaniel Axtens goto err_free_shadow; 3339253a496dSDaniel Axtens 3340253a496dSDaniel Axtens kasan_unpoison_vmalloc((void *)vas[area]->va_start, 3341253a496dSDaniel Axtens sizes[area]); 3342253a496dSDaniel Axtens } 3343253a496dSDaniel Axtens 3344ca23e405STejun Heo /* insert all vm's */ 3345e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 3346e36176beSUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 3347e36176beSUladzislau Rezki (Sony) insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); 3348e36176beSUladzislau Rezki (Sony) 3349e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, 3350ca23e405STejun Heo pcpu_get_vm_areas); 3351e36176beSUladzislau Rezki (Sony) } 3352e36176beSUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 3353ca23e405STejun Heo 3354ca23e405STejun Heo kfree(vas); 3355ca23e405STejun Heo return vms; 3356ca23e405STejun Heo 335768ad4a33SUladzislau Rezki (Sony) recovery: 3358e36176beSUladzislau Rezki (Sony) /* 3359e36176beSUladzislau Rezki (Sony) * Remove previously allocated areas. There is no 3360e36176beSUladzislau Rezki (Sony) * need in removing these areas from the busy tree, 3361e36176beSUladzislau Rezki (Sony) * because they are inserted only on the final step 3362e36176beSUladzislau Rezki (Sony) * and when pcpu_get_vm_areas() is success. 3363e36176beSUladzislau Rezki (Sony) */ 336468ad4a33SUladzislau Rezki (Sony) while (area--) { 3365253a496dSDaniel Axtens orig_start = vas[area]->va_start; 3366253a496dSDaniel Axtens orig_end = vas[area]->va_end; 3367253a496dSDaniel Axtens va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, 33683c5c3cfbSDaniel Axtens &free_vmap_area_list); 3369253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 3370253a496dSDaniel Axtens va->va_start, va->va_end); 337168ad4a33SUladzislau Rezki (Sony) vas[area] = NULL; 337268ad4a33SUladzislau Rezki (Sony) } 337368ad4a33SUladzislau Rezki (Sony) 337468ad4a33SUladzislau Rezki (Sony) overflow: 3375e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 337668ad4a33SUladzislau Rezki (Sony) if (!purged) { 337768ad4a33SUladzislau Rezki (Sony) purge_vmap_area_lazy(); 337868ad4a33SUladzislau Rezki (Sony) purged = true; 337968ad4a33SUladzislau Rezki (Sony) 338068ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */ 338168ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 338268ad4a33SUladzislau Rezki (Sony) if (vas[area]) 338368ad4a33SUladzislau Rezki (Sony) continue; 338468ad4a33SUladzislau Rezki (Sony) 338568ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc( 338668ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL); 338768ad4a33SUladzislau Rezki (Sony) if (!vas[area]) 338868ad4a33SUladzislau Rezki (Sony) goto err_free; 338968ad4a33SUladzislau Rezki (Sony) } 339068ad4a33SUladzislau Rezki (Sony) 339168ad4a33SUladzislau Rezki (Sony) goto retry; 339268ad4a33SUladzislau Rezki (Sony) } 339368ad4a33SUladzislau Rezki (Sony) 3394ca23e405STejun Heo err_free: 3395ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 339668ad4a33SUladzislau Rezki (Sony) if (vas[area]) 339768ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]); 339868ad4a33SUladzislau Rezki (Sony) 3399ca23e405STejun Heo kfree(vms[area]); 3400ca23e405STejun Heo } 3401f1db7afdSKautuk Consul err_free2: 3402ca23e405STejun Heo kfree(vas); 3403ca23e405STejun Heo kfree(vms); 3404ca23e405STejun Heo return NULL; 3405253a496dSDaniel Axtens 3406253a496dSDaniel Axtens err_free_shadow: 3407253a496dSDaniel Axtens spin_lock(&free_vmap_area_lock); 3408253a496dSDaniel Axtens /* 3409253a496dSDaniel Axtens * We release all the vmalloc shadows, even the ones for regions that 3410253a496dSDaniel Axtens * hadn't been successfully added. This relies on kasan_release_vmalloc 3411253a496dSDaniel Axtens * being able to tolerate this case. 3412253a496dSDaniel Axtens */ 3413253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 3414253a496dSDaniel Axtens orig_start = vas[area]->va_start; 3415253a496dSDaniel Axtens orig_end = vas[area]->va_end; 3416253a496dSDaniel Axtens va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, 3417253a496dSDaniel Axtens &free_vmap_area_list); 3418253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 3419253a496dSDaniel Axtens va->va_start, va->va_end); 3420253a496dSDaniel Axtens vas[area] = NULL; 3421253a496dSDaniel Axtens kfree(vms[area]); 3422253a496dSDaniel Axtens } 3423253a496dSDaniel Axtens spin_unlock(&free_vmap_area_lock); 3424253a496dSDaniel Axtens kfree(vas); 3425253a496dSDaniel Axtens kfree(vms); 3426253a496dSDaniel Axtens return NULL; 3427ca23e405STejun Heo } 3428ca23e405STejun Heo 3429ca23e405STejun Heo /** 3430ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 3431ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 3432ca23e405STejun Heo * @nr_vms: the number of allocated areas 3433ca23e405STejun Heo * 3434ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 3435ca23e405STejun Heo */ 3436ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 3437ca23e405STejun Heo { 3438ca23e405STejun Heo int i; 3439ca23e405STejun Heo 3440ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 3441ca23e405STejun Heo free_vm_area(vms[i]); 3442ca23e405STejun Heo kfree(vms); 3443ca23e405STejun Heo } 34444f8b02b4STejun Heo #endif /* CONFIG_SMP */ 3445a10aa579SChristoph Lameter 3446a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 3447a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 3448e36176beSUladzislau Rezki (Sony) __acquires(&vmap_purge_lock) 3449d4033afdSJoonsoo Kim __acquires(&vmap_area_lock) 3450a10aa579SChristoph Lameter { 3451e36176beSUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock); 3452d4033afdSJoonsoo Kim spin_lock(&vmap_area_lock); 3453e36176beSUladzislau Rezki (Sony) 34543f500069Szijun_hu return seq_list_start(&vmap_area_list, *pos); 3455a10aa579SChristoph Lameter } 3456a10aa579SChristoph Lameter 3457a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3458a10aa579SChristoph Lameter { 34593f500069Szijun_hu return seq_list_next(p, &vmap_area_list, pos); 3460a10aa579SChristoph Lameter } 3461a10aa579SChristoph Lameter 3462a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 3463e36176beSUladzislau Rezki (Sony) __releases(&vmap_purge_lock) 3464d4033afdSJoonsoo Kim __releases(&vmap_area_lock) 3465a10aa579SChristoph Lameter { 3466e36176beSUladzislau Rezki (Sony) mutex_unlock(&vmap_purge_lock); 3467d4033afdSJoonsoo Kim spin_unlock(&vmap_area_lock); 3468a10aa579SChristoph Lameter } 3469a10aa579SChristoph Lameter 3470a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 3471a47a126aSEric Dumazet { 3472e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 3473a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 3474a47a126aSEric Dumazet 3475a47a126aSEric Dumazet if (!counters) 3476a47a126aSEric Dumazet return; 3477a47a126aSEric Dumazet 3478af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 3479af12346cSWanpeng Li return; 34807e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 34817e5b528bSDmitry Vyukov smp_rmb(); 3482af12346cSWanpeng Li 3483a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 3484a47a126aSEric Dumazet 3485a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 3486a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 3487a47a126aSEric Dumazet 3488a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 3489a47a126aSEric Dumazet if (counters[nr]) 3490a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 3491a47a126aSEric Dumazet } 3492a47a126aSEric Dumazet } 3493a47a126aSEric Dumazet 3494dd3b8353SUladzislau Rezki (Sony) static void show_purge_info(struct seq_file *m) 3495dd3b8353SUladzislau Rezki (Sony) { 3496dd3b8353SUladzislau Rezki (Sony) struct llist_node *head; 3497dd3b8353SUladzislau Rezki (Sony) struct vmap_area *va; 3498dd3b8353SUladzislau Rezki (Sony) 3499dd3b8353SUladzislau Rezki (Sony) head = READ_ONCE(vmap_purge_list.first); 3500dd3b8353SUladzislau Rezki (Sony) if (head == NULL) 3501dd3b8353SUladzislau Rezki (Sony) return; 3502dd3b8353SUladzislau Rezki (Sony) 3503dd3b8353SUladzislau Rezki (Sony) llist_for_each_entry(va, head, purge_list) { 3504dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 3505dd3b8353SUladzislau Rezki (Sony) (void *)va->va_start, (void *)va->va_end, 3506dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 3507dd3b8353SUladzislau Rezki (Sony) } 3508dd3b8353SUladzislau Rezki (Sony) } 3509dd3b8353SUladzislau Rezki (Sony) 3510a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 3511a10aa579SChristoph Lameter { 35123f500069Szijun_hu struct vmap_area *va; 3513d4033afdSJoonsoo Kim struct vm_struct *v; 3514d4033afdSJoonsoo Kim 35153f500069Szijun_hu va = list_entry(p, struct vmap_area, list); 35163f500069Szijun_hu 3517c2ce8c14SWanpeng Li /* 3518688fcbfcSPengfei Li * s_show can encounter race with remove_vm_area, !vm on behalf 3519688fcbfcSPengfei Li * of vmap area is being tear down or vm_map_ram allocation. 3520c2ce8c14SWanpeng Li */ 3521688fcbfcSPengfei Li if (!va->vm) { 3522dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 352378c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end, 3524dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 352578c72746SYisheng Xie 3526d4033afdSJoonsoo Kim return 0; 352778c72746SYisheng Xie } 3528d4033afdSJoonsoo Kim 3529d4033afdSJoonsoo Kim v = va->vm; 3530a10aa579SChristoph Lameter 353145ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 3532a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 3533a10aa579SChristoph Lameter 353462c70bceSJoe Perches if (v->caller) 353562c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 353623016969SChristoph Lameter 3537a10aa579SChristoph Lameter if (v->nr_pages) 3538a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 3539a10aa579SChristoph Lameter 3540a10aa579SChristoph Lameter if (v->phys_addr) 3541199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr); 3542a10aa579SChristoph Lameter 3543a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 3544f4527c90SFabian Frederick seq_puts(m, " ioremap"); 3545a10aa579SChristoph Lameter 3546a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 3547f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 3548a10aa579SChristoph Lameter 3549a10aa579SChristoph Lameter if (v->flags & VM_MAP) 3550f4527c90SFabian Frederick seq_puts(m, " vmap"); 3551a10aa579SChristoph Lameter 3552a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 3553f4527c90SFabian Frederick seq_puts(m, " user"); 3554a10aa579SChristoph Lameter 3555fe9041c2SChristoph Hellwig if (v->flags & VM_DMA_COHERENT) 3556fe9041c2SChristoph Hellwig seq_puts(m, " dma-coherent"); 3557fe9041c2SChristoph Hellwig 3558244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 3559f4527c90SFabian Frederick seq_puts(m, " vpages"); 3560a10aa579SChristoph Lameter 3561a47a126aSEric Dumazet show_numa_info(m, v); 3562a10aa579SChristoph Lameter seq_putc(m, '\n'); 3563dd3b8353SUladzislau Rezki (Sony) 3564dd3b8353SUladzislau Rezki (Sony) /* 3565dd3b8353SUladzislau Rezki (Sony) * As a final step, dump "unpurged" areas. Note, 3566dd3b8353SUladzislau Rezki (Sony) * that entire "/proc/vmallocinfo" output will not 3567dd3b8353SUladzislau Rezki (Sony) * be address sorted, because the purge list is not 3568dd3b8353SUladzislau Rezki (Sony) * sorted. 3569dd3b8353SUladzislau Rezki (Sony) */ 3570dd3b8353SUladzislau Rezki (Sony) if (list_is_last(&va->list, &vmap_area_list)) 3571dd3b8353SUladzislau Rezki (Sony) show_purge_info(m); 3572dd3b8353SUladzislau Rezki (Sony) 3573a10aa579SChristoph Lameter return 0; 3574a10aa579SChristoph Lameter } 3575a10aa579SChristoph Lameter 35765f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 3577a10aa579SChristoph Lameter .start = s_start, 3578a10aa579SChristoph Lameter .next = s_next, 3579a10aa579SChristoph Lameter .stop = s_stop, 3580a10aa579SChristoph Lameter .show = s_show, 3581a10aa579SChristoph Lameter }; 35825f6a6a9cSAlexey Dobriyan 35835f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 35845f6a6a9cSAlexey Dobriyan { 3585fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA)) 35860825a6f9SJoe Perches proc_create_seq_private("vmallocinfo", 0400, NULL, 358744414d82SChristoph Hellwig &vmalloc_op, 358844414d82SChristoph Hellwig nr_node_ids * sizeof(unsigned int), NULL); 3589fddda2b7SChristoph Hellwig else 35900825a6f9SJoe Perches proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 35915f6a6a9cSAlexey Dobriyan return 0; 35925f6a6a9cSAlexey Dobriyan } 35935f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 3594db3808c1SJoonsoo Kim 3595a10aa579SChristoph Lameter #endif 3596