1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/vmalloc.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 61da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 71da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 81da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 9930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 12db64fe02SNick Piggin #include <linux/vmalloc.h> 131da177e4SLinus Torvalds #include <linux/mm.h> 141da177e4SLinus Torvalds #include <linux/module.h> 151da177e4SLinus Torvalds #include <linux/highmem.h> 16c3edc401SIngo Molnar #include <linux/sched/signal.h> 171da177e4SLinus Torvalds #include <linux/slab.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 191da177e4SLinus Torvalds #include <linux/interrupt.h> 205f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 21a10aa579SChristoph Lameter #include <linux/seq_file.h> 22868b104dSRick Edgecombe #include <linux/set_memory.h> 233ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2423016969SChristoph Lameter #include <linux/kallsyms.h> 25db64fe02SNick Piggin #include <linux/list.h> 264da56b99SChris Wilson #include <linux/notifier.h> 27db64fe02SNick Piggin #include <linux/rbtree.h> 28db64fe02SNick Piggin #include <linux/radix-tree.h> 29db64fe02SNick Piggin #include <linux/rcupdate.h> 30f0aa6617STejun Heo #include <linux/pfn.h> 3189219d37SCatalin Marinas #include <linux/kmemleak.h> 3260063497SArun Sharma #include <linux/atomic.h> 333b32123dSGideon Israel Dsouza #include <linux/compiler.h> 3432fcfd40SAl Viro #include <linux/llist.h> 350f616be1SToshi Kani #include <linux/bitops.h> 3668ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h> 37*bdebd6a2SJann Horn #include <linux/overflow.h> 383b32123dSGideon Israel Dsouza 397c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 401da177e4SLinus Torvalds #include <asm/tlbflush.h> 412dca6999SDavid Miller #include <asm/shmparam.h> 421da177e4SLinus Torvalds 43dd56b046SMel Gorman #include "internal.h" 44dd56b046SMel Gorman 45186525bdSIngo Molnar bool is_vmalloc_addr(const void *x) 46186525bdSIngo Molnar { 47186525bdSIngo Molnar unsigned long addr = (unsigned long)x; 48186525bdSIngo Molnar 49186525bdSIngo Molnar return addr >= VMALLOC_START && addr < VMALLOC_END; 50186525bdSIngo Molnar } 51186525bdSIngo Molnar EXPORT_SYMBOL(is_vmalloc_addr); 52186525bdSIngo Molnar 5332fcfd40SAl Viro struct vfree_deferred { 5432fcfd40SAl Viro struct llist_head list; 5532fcfd40SAl Viro struct work_struct wq; 5632fcfd40SAl Viro }; 5732fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 5832fcfd40SAl Viro 5932fcfd40SAl Viro static void __vunmap(const void *, int); 6032fcfd40SAl Viro 6132fcfd40SAl Viro static void free_work(struct work_struct *w) 6232fcfd40SAl Viro { 6332fcfd40SAl Viro struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 64894e58c1SByungchul Park struct llist_node *t, *llnode; 65894e58c1SByungchul Park 66894e58c1SByungchul Park llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 67894e58c1SByungchul Park __vunmap((void *)llnode, 1); 6832fcfd40SAl Viro } 6932fcfd40SAl Viro 70db64fe02SNick Piggin /*** Page table manipulation functions ***/ 71b221385bSAdrian Bunk 721da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 731da177e4SLinus Torvalds { 741da177e4SLinus Torvalds pte_t *pte; 751da177e4SLinus Torvalds 761da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 771da177e4SLinus Torvalds do { 781da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 791da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 801da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 811da177e4SLinus Torvalds } 821da177e4SLinus Torvalds 83db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 841da177e4SLinus Torvalds { 851da177e4SLinus Torvalds pmd_t *pmd; 861da177e4SLinus Torvalds unsigned long next; 871da177e4SLinus Torvalds 881da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 891da177e4SLinus Torvalds do { 901da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 91b9820d8fSToshi Kani if (pmd_clear_huge(pmd)) 92b9820d8fSToshi Kani continue; 931da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 941da177e4SLinus Torvalds continue; 951da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 961da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 971da177e4SLinus Torvalds } 981da177e4SLinus Torvalds 99c2febafcSKirill A. Shutemov static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) 1001da177e4SLinus Torvalds { 1011da177e4SLinus Torvalds pud_t *pud; 1021da177e4SLinus Torvalds unsigned long next; 1031da177e4SLinus Torvalds 104c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 1051da177e4SLinus Torvalds do { 1061da177e4SLinus Torvalds next = pud_addr_end(addr, end); 107b9820d8fSToshi Kani if (pud_clear_huge(pud)) 108b9820d8fSToshi Kani continue; 1091da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 1101da177e4SLinus Torvalds continue; 1111da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 1121da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1131da177e4SLinus Torvalds } 1141da177e4SLinus Torvalds 115c2febafcSKirill A. Shutemov static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) 116c2febafcSKirill A. Shutemov { 117c2febafcSKirill A. Shutemov p4d_t *p4d; 118c2febafcSKirill A. Shutemov unsigned long next; 119c2febafcSKirill A. Shutemov 120c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 121c2febafcSKirill A. Shutemov do { 122c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 123c2febafcSKirill A. Shutemov if (p4d_clear_huge(p4d)) 124c2febafcSKirill A. Shutemov continue; 125c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 126c2febafcSKirill A. Shutemov continue; 127c2febafcSKirill A. Shutemov vunmap_pud_range(p4d, addr, next); 128c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 129c2febafcSKirill A. Shutemov } 130c2febafcSKirill A. Shutemov 131db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 1321da177e4SLinus Torvalds { 1331da177e4SLinus Torvalds pgd_t *pgd; 1341da177e4SLinus Torvalds unsigned long next; 1351da177e4SLinus Torvalds 1361da177e4SLinus Torvalds BUG_ON(addr >= end); 1371da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1381da177e4SLinus Torvalds do { 1391da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 1401da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 1411da177e4SLinus Torvalds continue; 142c2febafcSKirill A. Shutemov vunmap_p4d_range(pgd, addr, next); 1431da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1441da177e4SLinus Torvalds } 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 147db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1481da177e4SLinus Torvalds { 1491da177e4SLinus Torvalds pte_t *pte; 1501da177e4SLinus Torvalds 151db64fe02SNick Piggin /* 152db64fe02SNick Piggin * nr is a running index into the array which helps higher level 153db64fe02SNick Piggin * callers keep track of where we're up to. 154db64fe02SNick Piggin */ 155db64fe02SNick Piggin 156872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1571da177e4SLinus Torvalds if (!pte) 1581da177e4SLinus Torvalds return -ENOMEM; 1591da177e4SLinus Torvalds do { 160db64fe02SNick Piggin struct page *page = pages[*nr]; 161db64fe02SNick Piggin 162db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 163db64fe02SNick Piggin return -EBUSY; 164db64fe02SNick Piggin if (WARN_ON(!page)) 1651da177e4SLinus Torvalds return -ENOMEM; 1661da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 167db64fe02SNick Piggin (*nr)++; 1681da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1691da177e4SLinus Torvalds return 0; 1701da177e4SLinus Torvalds } 1711da177e4SLinus Torvalds 172db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 173db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1741da177e4SLinus Torvalds { 1751da177e4SLinus Torvalds pmd_t *pmd; 1761da177e4SLinus Torvalds unsigned long next; 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1791da177e4SLinus Torvalds if (!pmd) 1801da177e4SLinus Torvalds return -ENOMEM; 1811da177e4SLinus Torvalds do { 1821da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 183db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1841da177e4SLinus Torvalds return -ENOMEM; 1851da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1861da177e4SLinus Torvalds return 0; 1871da177e4SLinus Torvalds } 1881da177e4SLinus Torvalds 189c2febafcSKirill A. Shutemov static int vmap_pud_range(p4d_t *p4d, unsigned long addr, 190db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1911da177e4SLinus Torvalds { 1921da177e4SLinus Torvalds pud_t *pud; 1931da177e4SLinus Torvalds unsigned long next; 1941da177e4SLinus Torvalds 195c2febafcSKirill A. Shutemov pud = pud_alloc(&init_mm, p4d, addr); 1961da177e4SLinus Torvalds if (!pud) 1971da177e4SLinus Torvalds return -ENOMEM; 1981da177e4SLinus Torvalds do { 1991da177e4SLinus Torvalds next = pud_addr_end(addr, end); 200db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 2011da177e4SLinus Torvalds return -ENOMEM; 2021da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 2031da177e4SLinus Torvalds return 0; 2041da177e4SLinus Torvalds } 2051da177e4SLinus Torvalds 206c2febafcSKirill A. Shutemov static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, 207c2febafcSKirill A. Shutemov unsigned long end, pgprot_t prot, struct page **pages, int *nr) 208c2febafcSKirill A. Shutemov { 209c2febafcSKirill A. Shutemov p4d_t *p4d; 210c2febafcSKirill A. Shutemov unsigned long next; 211c2febafcSKirill A. Shutemov 212c2febafcSKirill A. Shutemov p4d = p4d_alloc(&init_mm, pgd, addr); 213c2febafcSKirill A. Shutemov if (!p4d) 214c2febafcSKirill A. Shutemov return -ENOMEM; 215c2febafcSKirill A. Shutemov do { 216c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 217c2febafcSKirill A. Shutemov if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) 218c2febafcSKirill A. Shutemov return -ENOMEM; 219c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 220c2febafcSKirill A. Shutemov return 0; 221c2febafcSKirill A. Shutemov } 222c2febafcSKirill A. Shutemov 223db64fe02SNick Piggin /* 224db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 225db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 226db64fe02SNick Piggin * 227db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 228db64fe02SNick Piggin */ 2298fc48985STejun Heo static int vmap_page_range_noflush(unsigned long start, unsigned long end, 230db64fe02SNick Piggin pgprot_t prot, struct page **pages) 2311da177e4SLinus Torvalds { 2321da177e4SLinus Torvalds pgd_t *pgd; 2331da177e4SLinus Torvalds unsigned long next; 2342e4e27c7SAdam Lackorzynski unsigned long addr = start; 235db64fe02SNick Piggin int err = 0; 236db64fe02SNick Piggin int nr = 0; 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds BUG_ON(addr >= end); 2391da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 2401da177e4SLinus Torvalds do { 2411da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 242c2febafcSKirill A. Shutemov err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); 2431da177e4SLinus Torvalds if (err) 244bf88c8c8SFigo.zhang return err; 2451da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 246db64fe02SNick Piggin 247db64fe02SNick Piggin return nr; 2481da177e4SLinus Torvalds } 2491da177e4SLinus Torvalds 2508fc48985STejun Heo static int vmap_page_range(unsigned long start, unsigned long end, 2518fc48985STejun Heo pgprot_t prot, struct page **pages) 2528fc48985STejun Heo { 2538fc48985STejun Heo int ret; 2548fc48985STejun Heo 2558fc48985STejun Heo ret = vmap_page_range_noflush(start, end, prot, pages); 2568fc48985STejun Heo flush_cache_vmap(start, end); 2578fc48985STejun Heo return ret; 2588fc48985STejun Heo } 2598fc48985STejun Heo 26081ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 26173bdf0a6SLinus Torvalds { 26273bdf0a6SLinus Torvalds /* 263ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 26473bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 26573bdf0a6SLinus Torvalds * just put it in the vmalloc space. 26673bdf0a6SLinus Torvalds */ 26773bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 26873bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 26973bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 27073bdf0a6SLinus Torvalds return 1; 27173bdf0a6SLinus Torvalds #endif 27273bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 27373bdf0a6SLinus Torvalds } 27473bdf0a6SLinus Torvalds 27548667e7aSChristoph Lameter /* 276add688fbSmalc * Walk a vmap address to the struct page it maps. 27748667e7aSChristoph Lameter */ 278add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 27948667e7aSChristoph Lameter { 28048667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 281add688fbSmalc struct page *page = NULL; 28248667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 283c2febafcSKirill A. Shutemov p4d_t *p4d; 284c2febafcSKirill A. Shutemov pud_t *pud; 285c2febafcSKirill A. Shutemov pmd_t *pmd; 286c2febafcSKirill A. Shutemov pte_t *ptep, pte; 28748667e7aSChristoph Lameter 2887aa413deSIngo Molnar /* 2897aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2907aa413deSIngo Molnar * architectures that do not vmalloc module space 2917aa413deSIngo Molnar */ 29273bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 29359ea7463SJiri Slaby 294c2febafcSKirill A. Shutemov if (pgd_none(*pgd)) 295c2febafcSKirill A. Shutemov return NULL; 296c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 297c2febafcSKirill A. Shutemov if (p4d_none(*p4d)) 298c2febafcSKirill A. Shutemov return NULL; 299c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 300029c54b0SArd Biesheuvel 301029c54b0SArd Biesheuvel /* 302029c54b0SArd Biesheuvel * Don't dereference bad PUD or PMD (below) entries. This will also 303029c54b0SArd Biesheuvel * identify huge mappings, which we may encounter on architectures 304029c54b0SArd Biesheuvel * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be 305029c54b0SArd Biesheuvel * identified as vmalloc addresses by is_vmalloc_addr(), but are 306029c54b0SArd Biesheuvel * not [unambiguously] associated with a struct page, so there is 307029c54b0SArd Biesheuvel * no correct value to return for them. 308029c54b0SArd Biesheuvel */ 309029c54b0SArd Biesheuvel WARN_ON_ONCE(pud_bad(*pud)); 310029c54b0SArd Biesheuvel if (pud_none(*pud) || pud_bad(*pud)) 311c2febafcSKirill A. Shutemov return NULL; 312c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr); 313029c54b0SArd Biesheuvel WARN_ON_ONCE(pmd_bad(*pmd)); 314029c54b0SArd Biesheuvel if (pmd_none(*pmd) || pmd_bad(*pmd)) 315c2febafcSKirill A. Shutemov return NULL; 316db64fe02SNick Piggin 31748667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 31848667e7aSChristoph Lameter pte = *ptep; 31948667e7aSChristoph Lameter if (pte_present(pte)) 320add688fbSmalc page = pte_page(pte); 32148667e7aSChristoph Lameter pte_unmap(ptep); 322add688fbSmalc return page; 323ece86e22SJianyu Zhan } 324ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 325ece86e22SJianyu Zhan 326add688fbSmalc /* 327add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 328add688fbSmalc */ 329add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 330add688fbSmalc { 331add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 332add688fbSmalc } 333add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 334add688fbSmalc 335db64fe02SNick Piggin 336db64fe02SNick Piggin /*** Global kva allocator ***/ 337db64fe02SNick Piggin 338bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 339a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 340bb850f4dSUladzislau Rezki (Sony) 341db64fe02SNick Piggin 342db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 343e36176beSUladzislau Rezki (Sony) static DEFINE_SPINLOCK(free_vmap_area_lock); 344f1c4069eSJoonsoo Kim /* Export for kexec only */ 345f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list); 34680c4bd7aSChris Wilson static LLIST_HEAD(vmap_purge_list); 34789699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 34868ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly; 34989699605SNick Piggin 35068ad4a33SUladzislau Rezki (Sony) /* 35168ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of 35268ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to 35368ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of 35468ad4a33SUladzislau Rezki (Sony) * free block. 35568ad4a33SUladzislau Rezki (Sony) */ 35668ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep; 35789699605SNick Piggin 35868ad4a33SUladzislau Rezki (Sony) /* 35968ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root. 36068ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing. 36168ad4a33SUladzislau Rezki (Sony) */ 36268ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list); 36368ad4a33SUladzislau Rezki (Sony) 36468ad4a33SUladzislau Rezki (Sony) /* 36568ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space. 36668ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start 36768ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap 36868ad4a33SUladzislau Rezki (Sony) * object is released. 36968ad4a33SUladzislau Rezki (Sony) * 37068ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block 37168ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to 37268ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area. 37368ad4a33SUladzislau Rezki (Sony) */ 37468ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT; 37568ad4a33SUladzislau Rezki (Sony) 37682dd23e8SUladzislau Rezki (Sony) /* 37782dd23e8SUladzislau Rezki (Sony) * Preload a CPU with one object for "no edge" split case. The 37882dd23e8SUladzislau Rezki (Sony) * aim is to get rid of allocations from the atomic context, thus 37982dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks. 38082dd23e8SUladzislau Rezki (Sony) */ 38182dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 38282dd23e8SUladzislau Rezki (Sony) 38368ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 38468ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va) 38568ad4a33SUladzislau Rezki (Sony) { 38668ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start); 38768ad4a33SUladzislau Rezki (Sony) } 38868ad4a33SUladzislau Rezki (Sony) 38968ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 39068ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node) 39168ad4a33SUladzislau Rezki (Sony) { 39268ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 39368ad4a33SUladzislau Rezki (Sony) 39468ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node); 39568ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0; 39668ad4a33SUladzislau Rezki (Sony) } 39768ad4a33SUladzislau Rezki (Sony) 39868ad4a33SUladzislau Rezki (Sony) /* 39968ad4a33SUladzislau Rezki (Sony) * Gets called when remove the node and rotate. 40068ad4a33SUladzislau Rezki (Sony) */ 40168ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 40268ad4a33SUladzislau Rezki (Sony) compute_subtree_max_size(struct vmap_area *va) 40368ad4a33SUladzislau Rezki (Sony) { 40468ad4a33SUladzislau Rezki (Sony) return max3(va_size(va), 40568ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_left), 40668ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_right)); 40768ad4a33SUladzislau Rezki (Sony) } 40868ad4a33SUladzislau Rezki (Sony) 409315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 410315cc066SMichel Lespinasse struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 41168ad4a33SUladzislau Rezki (Sony) 41268ad4a33SUladzislau Rezki (Sony) static void purge_vmap_area_lazy(void); 41368ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 41468ad4a33SUladzislau Rezki (Sony) static unsigned long lazy_max_pages(void); 415db64fe02SNick Piggin 41697105f0aSRoman Gushchin static atomic_long_t nr_vmalloc_pages; 41797105f0aSRoman Gushchin 41897105f0aSRoman Gushchin unsigned long vmalloc_nr_pages(void) 41997105f0aSRoman Gushchin { 42097105f0aSRoman Gushchin return atomic_long_read(&nr_vmalloc_pages); 42197105f0aSRoman Gushchin } 42297105f0aSRoman Gushchin 423db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 4241da177e4SLinus Torvalds { 425db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 426db64fe02SNick Piggin 427db64fe02SNick Piggin while (n) { 428db64fe02SNick Piggin struct vmap_area *va; 429db64fe02SNick Piggin 430db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 431db64fe02SNick Piggin if (addr < va->va_start) 432db64fe02SNick Piggin n = n->rb_left; 433cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 434db64fe02SNick Piggin n = n->rb_right; 435db64fe02SNick Piggin else 436db64fe02SNick Piggin return va; 437db64fe02SNick Piggin } 438db64fe02SNick Piggin 439db64fe02SNick Piggin return NULL; 440db64fe02SNick Piggin } 441db64fe02SNick Piggin 44268ad4a33SUladzislau Rezki (Sony) /* 44368ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node 44468ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing. 44568ad4a33SUladzislau Rezki (Sony) */ 44668ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node ** 44768ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va, 44868ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from, 44968ad4a33SUladzislau Rezki (Sony) struct rb_node **parent) 450db64fe02SNick Piggin { 451170168d0SNamhyung Kim struct vmap_area *tmp_va; 45268ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 453db64fe02SNick Piggin 45468ad4a33SUladzislau Rezki (Sony) if (root) { 45568ad4a33SUladzislau Rezki (Sony) link = &root->rb_node; 45668ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) { 45768ad4a33SUladzislau Rezki (Sony) *parent = NULL; 45868ad4a33SUladzislau Rezki (Sony) return link; 45968ad4a33SUladzislau Rezki (Sony) } 46068ad4a33SUladzislau Rezki (Sony) } else { 46168ad4a33SUladzislau Rezki (Sony) link = &from; 46268ad4a33SUladzislau Rezki (Sony) } 46368ad4a33SUladzislau Rezki (Sony) 46468ad4a33SUladzislau Rezki (Sony) /* 46568ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point 46668ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name 46768ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to. 46868ad4a33SUladzislau Rezki (Sony) */ 46968ad4a33SUladzislau Rezki (Sony) do { 47068ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node); 47168ad4a33SUladzislau Rezki (Sony) 47268ad4a33SUladzislau Rezki (Sony) /* 47368ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check. 47468ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right) 47568ad4a33SUladzislau Rezki (Sony) * or full overlaps. 47668ad4a33SUladzislau Rezki (Sony) */ 47768ad4a33SUladzislau Rezki (Sony) if (va->va_start < tmp_va->va_end && 47868ad4a33SUladzislau Rezki (Sony) va->va_end <= tmp_va->va_start) 47968ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left; 48068ad4a33SUladzislau Rezki (Sony) else if (va->va_end > tmp_va->va_start && 48168ad4a33SUladzislau Rezki (Sony) va->va_start >= tmp_va->va_end) 48268ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right; 483db64fe02SNick Piggin else 484db64fe02SNick Piggin BUG(); 48568ad4a33SUladzislau Rezki (Sony) } while (*link); 48668ad4a33SUladzislau Rezki (Sony) 48768ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node; 48868ad4a33SUladzislau Rezki (Sony) return link; 489db64fe02SNick Piggin } 490db64fe02SNick Piggin 49168ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head * 49268ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 49368ad4a33SUladzislau Rezki (Sony) { 49468ad4a33SUladzislau Rezki (Sony) struct list_head *list; 495db64fe02SNick Piggin 49668ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent)) 49768ad4a33SUladzislau Rezki (Sony) /* 49868ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors 49968ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means 50068ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not 50168ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway. 50268ad4a33SUladzislau Rezki (Sony) */ 50368ad4a33SUladzislau Rezki (Sony) return NULL; 50468ad4a33SUladzislau Rezki (Sony) 50568ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list; 50668ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list); 507db64fe02SNick Piggin } 508db64fe02SNick Piggin 50968ad4a33SUladzislau Rezki (Sony) static __always_inline void 51068ad4a33SUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root, 51168ad4a33SUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, struct list_head *head) 51268ad4a33SUladzislau Rezki (Sony) { 51368ad4a33SUladzislau Rezki (Sony) /* 51468ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can 51568ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node. 51668ad4a33SUladzislau Rezki (Sony) */ 51768ad4a33SUladzislau Rezki (Sony) if (likely(parent)) { 51868ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list; 51968ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link) 52068ad4a33SUladzislau Rezki (Sony) head = head->prev; 52168ad4a33SUladzislau Rezki (Sony) } 522db64fe02SNick Piggin 52368ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */ 52468ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link); 52568ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) { 52668ad4a33SUladzislau Rezki (Sony) /* 52768ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion 52868ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to 52968ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented(). 53068ad4a33SUladzislau Rezki (Sony) * It is because of we populate the tree from the bottom 53168ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree. 53268ad4a33SUladzislau Rezki (Sony) * 53368ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion, 53468ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to 53568ad4a33SUladzislau Rezki (Sony) * the correct order later on. 53668ad4a33SUladzislau Rezki (Sony) */ 53768ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node, 53868ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 53968ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0; 54068ad4a33SUladzislau Rezki (Sony) } else { 54168ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root); 54268ad4a33SUladzislau Rezki (Sony) } 54368ad4a33SUladzislau Rezki (Sony) 54468ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */ 54568ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head); 54668ad4a33SUladzislau Rezki (Sony) } 54768ad4a33SUladzislau Rezki (Sony) 54868ad4a33SUladzislau Rezki (Sony) static __always_inline void 54968ad4a33SUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root) 55068ad4a33SUladzislau Rezki (Sony) { 551460e42d1SUladzislau Rezki (Sony) if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 552460e42d1SUladzislau Rezki (Sony) return; 553460e42d1SUladzislau Rezki (Sony) 55468ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) 55568ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node, 55668ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 55768ad4a33SUladzislau Rezki (Sony) else 55868ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root); 55968ad4a33SUladzislau Rezki (Sony) 56068ad4a33SUladzislau Rezki (Sony) list_del(&va->list); 56168ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node); 56268ad4a33SUladzislau Rezki (Sony) } 56368ad4a33SUladzislau Rezki (Sony) 564bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 565bb850f4dSUladzislau Rezki (Sony) static void 566bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(struct rb_node *n) 567bb850f4dSUladzislau Rezki (Sony) { 568bb850f4dSUladzislau Rezki (Sony) struct vmap_area *va; 569bb850f4dSUladzislau Rezki (Sony) struct rb_node *node; 570bb850f4dSUladzislau Rezki (Sony) unsigned long size; 571bb850f4dSUladzislau Rezki (Sony) bool found = false; 572bb850f4dSUladzislau Rezki (Sony) 573bb850f4dSUladzislau Rezki (Sony) if (n == NULL) 574bb850f4dSUladzislau Rezki (Sony) return; 575bb850f4dSUladzislau Rezki (Sony) 576bb850f4dSUladzislau Rezki (Sony) va = rb_entry(n, struct vmap_area, rb_node); 577bb850f4dSUladzislau Rezki (Sony) size = va->subtree_max_size; 578bb850f4dSUladzislau Rezki (Sony) node = n; 579bb850f4dSUladzislau Rezki (Sony) 580bb850f4dSUladzislau Rezki (Sony) while (node) { 581bb850f4dSUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 582bb850f4dSUladzislau Rezki (Sony) 583bb850f4dSUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_left) == size) { 584bb850f4dSUladzislau Rezki (Sony) node = node->rb_left; 585bb850f4dSUladzislau Rezki (Sony) } else { 586bb850f4dSUladzislau Rezki (Sony) if (va_size(va) == size) { 587bb850f4dSUladzislau Rezki (Sony) found = true; 588bb850f4dSUladzislau Rezki (Sony) break; 589bb850f4dSUladzislau Rezki (Sony) } 590bb850f4dSUladzislau Rezki (Sony) 591bb850f4dSUladzislau Rezki (Sony) node = node->rb_right; 592bb850f4dSUladzislau Rezki (Sony) } 593bb850f4dSUladzislau Rezki (Sony) } 594bb850f4dSUladzislau Rezki (Sony) 595bb850f4dSUladzislau Rezki (Sony) if (!found) { 596bb850f4dSUladzislau Rezki (Sony) va = rb_entry(n, struct vmap_area, rb_node); 597bb850f4dSUladzislau Rezki (Sony) pr_emerg("tree is corrupted: %lu, %lu\n", 598bb850f4dSUladzislau Rezki (Sony) va_size(va), va->subtree_max_size); 599bb850f4dSUladzislau Rezki (Sony) } 600bb850f4dSUladzislau Rezki (Sony) 601bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(n->rb_left); 602bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(n->rb_right); 603bb850f4dSUladzislau Rezki (Sony) } 604bb850f4dSUladzislau Rezki (Sony) #endif 605bb850f4dSUladzislau Rezki (Sony) 60668ad4a33SUladzislau Rezki (Sony) /* 60768ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper 60868ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done 60968ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or 61068ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree. 61168ad4a33SUladzislau Rezki (Sony) * 61268ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called: 61368ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path); 61468ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path); 61568ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path). 61668ad4a33SUladzislau Rezki (Sony) * 61768ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes 61868ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up 61968ad4a33SUladzislau Rezki (Sony) * to the root node. 62068ad4a33SUladzislau Rezki (Sony) * 62168ad4a33SUladzislau Rezki (Sony) * 4--8 62268ad4a33SUladzislau Rezki (Sony) * /\ 62368ad4a33SUladzislau Rezki (Sony) * / \ 62468ad4a33SUladzislau Rezki (Sony) * / \ 62568ad4a33SUladzislau Rezki (Sony) * 2--2 8--8 62668ad4a33SUladzislau Rezki (Sony) * 62768ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then 62868ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1 62968ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink 63068ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent 63168ad4a33SUladzislau Rezki (Sony) * node becomes 4--6. 63268ad4a33SUladzislau Rezki (Sony) */ 63368ad4a33SUladzislau Rezki (Sony) static __always_inline void 63468ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va) 63568ad4a33SUladzislau Rezki (Sony) { 63668ad4a33SUladzislau Rezki (Sony) struct rb_node *node = &va->rb_node; 63768ad4a33SUladzislau Rezki (Sony) unsigned long new_va_sub_max_size; 63868ad4a33SUladzislau Rezki (Sony) 63968ad4a33SUladzislau Rezki (Sony) while (node) { 64068ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 64168ad4a33SUladzislau Rezki (Sony) new_va_sub_max_size = compute_subtree_max_size(va); 64268ad4a33SUladzislau Rezki (Sony) 64368ad4a33SUladzislau Rezki (Sony) /* 64468ad4a33SUladzislau Rezki (Sony) * If the newly calculated maximum available size of the 64568ad4a33SUladzislau Rezki (Sony) * subtree is equal to the current one, then it means that 64668ad4a33SUladzislau Rezki (Sony) * the tree is propagated correctly. So we have to stop at 64768ad4a33SUladzislau Rezki (Sony) * this point to save cycles. 64868ad4a33SUladzislau Rezki (Sony) */ 64968ad4a33SUladzislau Rezki (Sony) if (va->subtree_max_size == new_va_sub_max_size) 65068ad4a33SUladzislau Rezki (Sony) break; 65168ad4a33SUladzislau Rezki (Sony) 65268ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = new_va_sub_max_size; 65368ad4a33SUladzislau Rezki (Sony) node = rb_parent(&va->rb_node); 65468ad4a33SUladzislau Rezki (Sony) } 655bb850f4dSUladzislau Rezki (Sony) 656bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 657bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(free_vmap_area_root.rb_node); 658bb850f4dSUladzislau Rezki (Sony) #endif 65968ad4a33SUladzislau Rezki (Sony) } 66068ad4a33SUladzislau Rezki (Sony) 66168ad4a33SUladzislau Rezki (Sony) static void 66268ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va, 66368ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 66468ad4a33SUladzislau Rezki (Sony) { 66568ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 66668ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 66768ad4a33SUladzislau Rezki (Sony) 66868ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 66968ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 67068ad4a33SUladzislau Rezki (Sony) } 67168ad4a33SUladzislau Rezki (Sony) 67268ad4a33SUladzislau Rezki (Sony) static void 67368ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va, 67468ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root, 67568ad4a33SUladzislau Rezki (Sony) struct list_head *head) 67668ad4a33SUladzislau Rezki (Sony) { 67768ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 67868ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 67968ad4a33SUladzislau Rezki (Sony) 68068ad4a33SUladzislau Rezki (Sony) if (from) 68168ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent); 68268ad4a33SUladzislau Rezki (Sony) else 68368ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 68468ad4a33SUladzislau Rezki (Sony) 68568ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 68668ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 68768ad4a33SUladzislau Rezki (Sony) } 68868ad4a33SUladzislau Rezki (Sony) 68968ad4a33SUladzislau Rezki (Sony) /* 69068ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous 69168ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new 69268ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is 69368ad4a33SUladzislau Rezki (Sony) * freed. 69468ad4a33SUladzislau Rezki (Sony) */ 6953c5c3cfbSDaniel Axtens static __always_inline struct vmap_area * 69668ad4a33SUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va, 69768ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 69868ad4a33SUladzislau Rezki (Sony) { 69968ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling; 70068ad4a33SUladzislau Rezki (Sony) struct list_head *next; 70168ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 70268ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 70368ad4a33SUladzislau Rezki (Sony) bool merged = false; 70468ad4a33SUladzislau Rezki (Sony) 70568ad4a33SUladzislau Rezki (Sony) /* 70668ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be 70768ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings. 70868ad4a33SUladzislau Rezki (Sony) */ 70968ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 71068ad4a33SUladzislau Rezki (Sony) 71168ad4a33SUladzislau Rezki (Sony) /* 71268ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done. 71368ad4a33SUladzislau Rezki (Sony) */ 71468ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link); 71568ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL)) 71668ad4a33SUladzislau Rezki (Sony) goto insert; 71768ad4a33SUladzislau Rezki (Sony) 71868ad4a33SUladzislau Rezki (Sony) /* 71968ad4a33SUladzislau Rezki (Sony) * start end 72068ad4a33SUladzislau Rezki (Sony) * | | 72168ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->| 72268ad4a33SUladzislau Rezki (Sony) * | | 72368ad4a33SUladzislau Rezki (Sony) * start end 72468ad4a33SUladzislau Rezki (Sony) */ 72568ad4a33SUladzislau Rezki (Sony) if (next != head) { 72668ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list); 72768ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) { 72868ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start; 72968ad4a33SUladzislau Rezki (Sony) 73068ad4a33SUladzislau Rezki (Sony) /* Check and update the tree if needed. */ 73168ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(sibling); 73268ad4a33SUladzislau Rezki (Sony) 73368ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 73468ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 73568ad4a33SUladzislau Rezki (Sony) 73668ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */ 73768ad4a33SUladzislau Rezki (Sony) va = sibling; 73868ad4a33SUladzislau Rezki (Sony) merged = true; 73968ad4a33SUladzislau Rezki (Sony) } 74068ad4a33SUladzislau Rezki (Sony) } 74168ad4a33SUladzislau Rezki (Sony) 74268ad4a33SUladzislau Rezki (Sony) /* 74368ad4a33SUladzislau Rezki (Sony) * start end 74468ad4a33SUladzislau Rezki (Sony) * | | 74568ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>| 74668ad4a33SUladzislau Rezki (Sony) * | | 74768ad4a33SUladzislau Rezki (Sony) * start end 74868ad4a33SUladzislau Rezki (Sony) */ 74968ad4a33SUladzislau Rezki (Sony) if (next->prev != head) { 75068ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list); 75168ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) { 75268ad4a33SUladzislau Rezki (Sony) sibling->va_end = va->va_end; 75368ad4a33SUladzislau Rezki (Sony) 75468ad4a33SUladzislau Rezki (Sony) /* Check and update the tree if needed. */ 75568ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(sibling); 75668ad4a33SUladzislau Rezki (Sony) 75754f63d9dSUladzislau Rezki (Sony) if (merged) 75868ad4a33SUladzislau Rezki (Sony) unlink_va(va, root); 75968ad4a33SUladzislau Rezki (Sony) 76068ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 76168ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 7623c5c3cfbSDaniel Axtens 7633c5c3cfbSDaniel Axtens /* Point to the new merged area. */ 7643c5c3cfbSDaniel Axtens va = sibling; 7653c5c3cfbSDaniel Axtens merged = true; 76668ad4a33SUladzislau Rezki (Sony) } 76768ad4a33SUladzislau Rezki (Sony) } 76868ad4a33SUladzislau Rezki (Sony) 76968ad4a33SUladzislau Rezki (Sony) insert: 77068ad4a33SUladzislau Rezki (Sony) if (!merged) { 77168ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 77268ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 77368ad4a33SUladzislau Rezki (Sony) } 7743c5c3cfbSDaniel Axtens 7753c5c3cfbSDaniel Axtens return va; 77668ad4a33SUladzislau Rezki (Sony) } 77768ad4a33SUladzislau Rezki (Sony) 77868ad4a33SUladzislau Rezki (Sony) static __always_inline bool 77968ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size, 78068ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 78168ad4a33SUladzislau Rezki (Sony) { 78268ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 78368ad4a33SUladzislau Rezki (Sony) 78468ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 78568ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 78668ad4a33SUladzislau Rezki (Sony) else 78768ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 78868ad4a33SUladzislau Rezki (Sony) 78968ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */ 79068ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr || 79168ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart) 79268ad4a33SUladzislau Rezki (Sony) return false; 79368ad4a33SUladzislau Rezki (Sony) 79468ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end); 79568ad4a33SUladzislau Rezki (Sony) } 79668ad4a33SUladzislau Rezki (Sony) 79768ad4a33SUladzislau Rezki (Sony) /* 79868ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree, 79968ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing 80068ad4a33SUladzislau Rezki (Sony) * parameters. 80168ad4a33SUladzislau Rezki (Sony) */ 80268ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 80368ad4a33SUladzislau Rezki (Sony) find_vmap_lowest_match(unsigned long size, 80468ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 80568ad4a33SUladzislau Rezki (Sony) { 80668ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 80768ad4a33SUladzislau Rezki (Sony) struct rb_node *node; 80868ad4a33SUladzislau Rezki (Sony) unsigned long length; 80968ad4a33SUladzislau Rezki (Sony) 81068ad4a33SUladzislau Rezki (Sony) /* Start from the root. */ 81168ad4a33SUladzislau Rezki (Sony) node = free_vmap_area_root.rb_node; 81268ad4a33SUladzislau Rezki (Sony) 81368ad4a33SUladzislau Rezki (Sony) /* Adjust the search size for alignment overhead. */ 81468ad4a33SUladzislau Rezki (Sony) length = size + align - 1; 81568ad4a33SUladzislau Rezki (Sony) 81668ad4a33SUladzislau Rezki (Sony) while (node) { 81768ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 81868ad4a33SUladzislau Rezki (Sony) 81968ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_left) >= length && 82068ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) { 82168ad4a33SUladzislau Rezki (Sony) node = node->rb_left; 82268ad4a33SUladzislau Rezki (Sony) } else { 82368ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 82468ad4a33SUladzislau Rezki (Sony) return va; 82568ad4a33SUladzislau Rezki (Sony) 82668ad4a33SUladzislau Rezki (Sony) /* 82768ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right 82868ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is 82968ad4a33SUladzislau Rezki (Sony) * equal or bigger to the requested search length. 83068ad4a33SUladzislau Rezki (Sony) */ 83168ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length) { 83268ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 83368ad4a33SUladzislau Rezki (Sony) continue; 83468ad4a33SUladzislau Rezki (Sony) } 83568ad4a33SUladzislau Rezki (Sony) 83668ad4a33SUladzislau Rezki (Sony) /* 8373806b041SAndrew Morton * OK. We roll back and find the first right sub-tree, 83868ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen 83968ad4a33SUladzislau Rezki (Sony) * only once due to "vstart" restriction. 84068ad4a33SUladzislau Rezki (Sony) */ 84168ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) { 84268ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 84368ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 84468ad4a33SUladzislau Rezki (Sony) return va; 84568ad4a33SUladzislau Rezki (Sony) 84668ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length && 84768ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) { 84868ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 84968ad4a33SUladzislau Rezki (Sony) break; 85068ad4a33SUladzislau Rezki (Sony) } 85168ad4a33SUladzislau Rezki (Sony) } 85268ad4a33SUladzislau Rezki (Sony) } 85368ad4a33SUladzislau Rezki (Sony) } 85468ad4a33SUladzislau Rezki (Sony) 85568ad4a33SUladzislau Rezki (Sony) return NULL; 85668ad4a33SUladzislau Rezki (Sony) } 85768ad4a33SUladzislau Rezki (Sony) 858a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 859a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h> 860a6cf4e0fSUladzislau Rezki (Sony) 861a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area * 862a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_linear_match(unsigned long size, 863a6cf4e0fSUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 864a6cf4e0fSUladzislau Rezki (Sony) { 865a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va; 866a6cf4e0fSUladzislau Rezki (Sony) 867a6cf4e0fSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) { 868a6cf4e0fSUladzislau Rezki (Sony) if (!is_within_this_va(va, size, align, vstart)) 869a6cf4e0fSUladzislau Rezki (Sony) continue; 870a6cf4e0fSUladzislau Rezki (Sony) 871a6cf4e0fSUladzislau Rezki (Sony) return va; 872a6cf4e0fSUladzislau Rezki (Sony) } 873a6cf4e0fSUladzislau Rezki (Sony) 874a6cf4e0fSUladzislau Rezki (Sony) return NULL; 875a6cf4e0fSUladzislau Rezki (Sony) } 876a6cf4e0fSUladzislau Rezki (Sony) 877a6cf4e0fSUladzislau Rezki (Sony) static void 878a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_match_check(unsigned long size) 879a6cf4e0fSUladzislau Rezki (Sony) { 880a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va_1, *va_2; 881a6cf4e0fSUladzislau Rezki (Sony) unsigned long vstart; 882a6cf4e0fSUladzislau Rezki (Sony) unsigned int rnd; 883a6cf4e0fSUladzislau Rezki (Sony) 884a6cf4e0fSUladzislau Rezki (Sony) get_random_bytes(&rnd, sizeof(rnd)); 885a6cf4e0fSUladzislau Rezki (Sony) vstart = VMALLOC_START + rnd; 886a6cf4e0fSUladzislau Rezki (Sony) 887a6cf4e0fSUladzislau Rezki (Sony) va_1 = find_vmap_lowest_match(size, 1, vstart); 888a6cf4e0fSUladzislau Rezki (Sony) va_2 = find_vmap_lowest_linear_match(size, 1, vstart); 889a6cf4e0fSUladzislau Rezki (Sony) 890a6cf4e0fSUladzislau Rezki (Sony) if (va_1 != va_2) 891a6cf4e0fSUladzislau Rezki (Sony) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 892a6cf4e0fSUladzislau Rezki (Sony) va_1, va_2, vstart); 893a6cf4e0fSUladzislau Rezki (Sony) } 894a6cf4e0fSUladzislau Rezki (Sony) #endif 895a6cf4e0fSUladzislau Rezki (Sony) 89668ad4a33SUladzislau Rezki (Sony) enum fit_type { 89768ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0, 89868ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */ 89968ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */ 90068ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */ 90168ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */ 90268ad4a33SUladzislau Rezki (Sony) }; 90368ad4a33SUladzislau Rezki (Sony) 90468ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type 90568ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va, 90668ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size) 90768ad4a33SUladzislau Rezki (Sony) { 90868ad4a33SUladzislau Rezki (Sony) enum fit_type type; 90968ad4a33SUladzislau Rezki (Sony) 91068ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */ 91168ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start || 91268ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end) 91368ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT; 91468ad4a33SUladzislau Rezki (Sony) 91568ad4a33SUladzislau Rezki (Sony) /* Now classify. */ 91668ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) { 91768ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size) 91868ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE; 91968ad4a33SUladzislau Rezki (Sony) else 92068ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE; 92168ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) { 92268ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE; 92368ad4a33SUladzislau Rezki (Sony) } else { 92468ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE; 92568ad4a33SUladzislau Rezki (Sony) } 92668ad4a33SUladzislau Rezki (Sony) 92768ad4a33SUladzislau Rezki (Sony) return type; 92868ad4a33SUladzislau Rezki (Sony) } 92968ad4a33SUladzislau Rezki (Sony) 93068ad4a33SUladzislau Rezki (Sony) static __always_inline int 93168ad4a33SUladzislau Rezki (Sony) adjust_va_to_fit_type(struct vmap_area *va, 93268ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size, 93368ad4a33SUladzislau Rezki (Sony) enum fit_type type) 93468ad4a33SUladzislau Rezki (Sony) { 9352c929233SArnd Bergmann struct vmap_area *lva = NULL; 93668ad4a33SUladzislau Rezki (Sony) 93768ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) { 93868ad4a33SUladzislau Rezki (Sony) /* 93968ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits. 94068ad4a33SUladzislau Rezki (Sony) * 94168ad4a33SUladzislau Rezki (Sony) * | | 94268ad4a33SUladzislau Rezki (Sony) * V NVA V 94368ad4a33SUladzislau Rezki (Sony) * |---------------| 94468ad4a33SUladzislau Rezki (Sony) */ 94568ad4a33SUladzislau Rezki (Sony) unlink_va(va, &free_vmap_area_root); 94668ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 94768ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) { 94868ad4a33SUladzislau Rezki (Sony) /* 94968ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA. 95068ad4a33SUladzislau Rezki (Sony) * 95168ad4a33SUladzislau Rezki (Sony) * | | 95268ad4a33SUladzislau Rezki (Sony) * V NVA V R 95368ad4a33SUladzislau Rezki (Sony) * |-------|-------| 95468ad4a33SUladzislau Rezki (Sony) */ 95568ad4a33SUladzislau Rezki (Sony) va->va_start += size; 95668ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) { 95768ad4a33SUladzislau Rezki (Sony) /* 95868ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA. 95968ad4a33SUladzislau Rezki (Sony) * 96068ad4a33SUladzislau Rezki (Sony) * | | 96168ad4a33SUladzislau Rezki (Sony) * L V NVA V 96268ad4a33SUladzislau Rezki (Sony) * |-------|-------| 96368ad4a33SUladzislau Rezki (Sony) */ 96468ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr; 96568ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) { 96668ad4a33SUladzislau Rezki (Sony) /* 96768ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA. 96868ad4a33SUladzislau Rezki (Sony) * 96968ad4a33SUladzislau Rezki (Sony) * | | 97068ad4a33SUladzislau Rezki (Sony) * L V NVA V R 97168ad4a33SUladzislau Rezki (Sony) * |---|-------|---| 97268ad4a33SUladzislau Rezki (Sony) */ 97382dd23e8SUladzislau Rezki (Sony) lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 97482dd23e8SUladzislau Rezki (Sony) if (unlikely(!lva)) { 97582dd23e8SUladzislau Rezki (Sony) /* 97682dd23e8SUladzislau Rezki (Sony) * For percpu allocator we do not do any pre-allocation 97782dd23e8SUladzislau Rezki (Sony) * and leave it as it is. The reason is it most likely 97882dd23e8SUladzislau Rezki (Sony) * never ends up with NE_FIT_TYPE splitting. In case of 97982dd23e8SUladzislau Rezki (Sony) * percpu allocations offsets and sizes are aligned to 98082dd23e8SUladzislau Rezki (Sony) * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 98182dd23e8SUladzislau Rezki (Sony) * are its main fitting cases. 98282dd23e8SUladzislau Rezki (Sony) * 98382dd23e8SUladzislau Rezki (Sony) * There are a few exceptions though, as an example it is 98482dd23e8SUladzislau Rezki (Sony) * a first allocation (early boot up) when we have "one" 98582dd23e8SUladzislau Rezki (Sony) * big free space that has to be split. 986060650a2SUladzislau Rezki (Sony) * 987060650a2SUladzislau Rezki (Sony) * Also we can hit this path in case of regular "vmap" 988060650a2SUladzislau Rezki (Sony) * allocations, if "this" current CPU was not preloaded. 989060650a2SUladzislau Rezki (Sony) * See the comment in alloc_vmap_area() why. If so, then 990060650a2SUladzislau Rezki (Sony) * GFP_NOWAIT is used instead to get an extra object for 991060650a2SUladzislau Rezki (Sony) * split purpose. That is rare and most time does not 992060650a2SUladzislau Rezki (Sony) * occur. 993060650a2SUladzislau Rezki (Sony) * 994060650a2SUladzislau Rezki (Sony) * What happens if an allocation gets failed. Basically, 995060650a2SUladzislau Rezki (Sony) * an "overflow" path is triggered to purge lazily freed 996060650a2SUladzislau Rezki (Sony) * areas to free some memory, then, the "retry" path is 997060650a2SUladzislau Rezki (Sony) * triggered to repeat one more time. See more details 998060650a2SUladzislau Rezki (Sony) * in alloc_vmap_area() function. 99982dd23e8SUladzislau Rezki (Sony) */ 100068ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 100182dd23e8SUladzislau Rezki (Sony) if (!lva) 100268ad4a33SUladzislau Rezki (Sony) return -1; 100382dd23e8SUladzislau Rezki (Sony) } 100468ad4a33SUladzislau Rezki (Sony) 100568ad4a33SUladzislau Rezki (Sony) /* 100668ad4a33SUladzislau Rezki (Sony) * Build the remainder. 100768ad4a33SUladzislau Rezki (Sony) */ 100868ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start; 100968ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr; 101068ad4a33SUladzislau Rezki (Sony) 101168ad4a33SUladzislau Rezki (Sony) /* 101268ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size. 101368ad4a33SUladzislau Rezki (Sony) */ 101468ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size; 101568ad4a33SUladzislau Rezki (Sony) } else { 101668ad4a33SUladzislau Rezki (Sony) return -1; 101768ad4a33SUladzislau Rezki (Sony) } 101868ad4a33SUladzislau Rezki (Sony) 101968ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) { 102068ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 102168ad4a33SUladzislau Rezki (Sony) 10222c929233SArnd Bergmann if (lva) /* type == NE_FIT_TYPE */ 102368ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, 102468ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list); 102568ad4a33SUladzislau Rezki (Sony) } 102668ad4a33SUladzislau Rezki (Sony) 102768ad4a33SUladzislau Rezki (Sony) return 0; 102868ad4a33SUladzislau Rezki (Sony) } 102968ad4a33SUladzislau Rezki (Sony) 103068ad4a33SUladzislau Rezki (Sony) /* 103168ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success. 103268ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure. 103368ad4a33SUladzislau Rezki (Sony) */ 103468ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 103568ad4a33SUladzislau Rezki (Sony) __alloc_vmap_area(unsigned long size, unsigned long align, 1036cacca6baSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend) 103768ad4a33SUladzislau Rezki (Sony) { 103868ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 103968ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 104068ad4a33SUladzislau Rezki (Sony) enum fit_type type; 104168ad4a33SUladzislau Rezki (Sony) int ret; 104268ad4a33SUladzislau Rezki (Sony) 104368ad4a33SUladzislau Rezki (Sony) va = find_vmap_lowest_match(size, align, vstart); 104468ad4a33SUladzislau Rezki (Sony) if (unlikely(!va)) 104568ad4a33SUladzislau Rezki (Sony) return vend; 104668ad4a33SUladzislau Rezki (Sony) 104768ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 104868ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 104968ad4a33SUladzislau Rezki (Sony) else 105068ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 105168ad4a33SUladzislau Rezki (Sony) 105268ad4a33SUladzislau Rezki (Sony) /* Check the "vend" restriction. */ 105368ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size > vend) 105468ad4a33SUladzislau Rezki (Sony) return vend; 105568ad4a33SUladzislau Rezki (Sony) 105668ad4a33SUladzislau Rezki (Sony) /* Classify what we have found. */ 105768ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, nva_start_addr, size); 105868ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 105968ad4a33SUladzislau Rezki (Sony) return vend; 106068ad4a33SUladzislau Rezki (Sony) 106168ad4a33SUladzislau Rezki (Sony) /* Update the free vmap_area. */ 106268ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); 106368ad4a33SUladzislau Rezki (Sony) if (ret) 106468ad4a33SUladzislau Rezki (Sony) return vend; 106568ad4a33SUladzislau Rezki (Sony) 1066a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1067a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_match_check(size); 1068a6cf4e0fSUladzislau Rezki (Sony) #endif 1069a6cf4e0fSUladzislau Rezki (Sony) 107068ad4a33SUladzislau Rezki (Sony) return nva_start_addr; 107168ad4a33SUladzislau Rezki (Sony) } 10724da56b99SChris Wilson 1073db64fe02SNick Piggin /* 1074d98c9e83SAndrey Ryabinin * Free a region of KVA allocated by alloc_vmap_area 1075d98c9e83SAndrey Ryabinin */ 1076d98c9e83SAndrey Ryabinin static void free_vmap_area(struct vmap_area *va) 1077d98c9e83SAndrey Ryabinin { 1078d98c9e83SAndrey Ryabinin /* 1079d98c9e83SAndrey Ryabinin * Remove from the busy tree/list. 1080d98c9e83SAndrey Ryabinin */ 1081d98c9e83SAndrey Ryabinin spin_lock(&vmap_area_lock); 1082d98c9e83SAndrey Ryabinin unlink_va(va, &vmap_area_root); 1083d98c9e83SAndrey Ryabinin spin_unlock(&vmap_area_lock); 1084d98c9e83SAndrey Ryabinin 1085d98c9e83SAndrey Ryabinin /* 1086d98c9e83SAndrey Ryabinin * Insert/Merge it back to the free tree/list. 1087d98c9e83SAndrey Ryabinin */ 1088d98c9e83SAndrey Ryabinin spin_lock(&free_vmap_area_lock); 1089d98c9e83SAndrey Ryabinin merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list); 1090d98c9e83SAndrey Ryabinin spin_unlock(&free_vmap_area_lock); 1091d98c9e83SAndrey Ryabinin } 1092d98c9e83SAndrey Ryabinin 1093d98c9e83SAndrey Ryabinin /* 1094db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 1095db64fe02SNick Piggin * vstart and vend. 1096db64fe02SNick Piggin */ 1097db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 1098db64fe02SNick Piggin unsigned long align, 1099db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 1100db64fe02SNick Piggin int node, gfp_t gfp_mask) 1101db64fe02SNick Piggin { 110282dd23e8SUladzislau Rezki (Sony) struct vmap_area *va, *pva; 11031da177e4SLinus Torvalds unsigned long addr; 1104db64fe02SNick Piggin int purged = 0; 1105d98c9e83SAndrey Ryabinin int ret; 1106db64fe02SNick Piggin 11077766970cSNick Piggin BUG_ON(!size); 1108891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 110989699605SNick Piggin BUG_ON(!is_power_of_2(align)); 1110db64fe02SNick Piggin 111168ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized)) 111268ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY); 111368ad4a33SUladzislau Rezki (Sony) 11145803ed29SChristoph Hellwig might_sleep(); 1115f07116d7SUladzislau Rezki (Sony) gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 11164da56b99SChris Wilson 1117f07116d7SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1118db64fe02SNick Piggin if (unlikely(!va)) 1119db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1120db64fe02SNick Piggin 11217f88f88fSCatalin Marinas /* 11227f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 11237f88f88fSCatalin Marinas * to avoid false negatives. 11247f88f88fSCatalin Marinas */ 1125f07116d7SUladzislau Rezki (Sony) kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 11267f88f88fSCatalin Marinas 1127db64fe02SNick Piggin retry: 112882dd23e8SUladzislau Rezki (Sony) /* 112981f1ba58SUladzislau Rezki (Sony) * Preload this CPU with one extra vmap_area object. It is used 113081f1ba58SUladzislau Rezki (Sony) * when fit type of free area is NE_FIT_TYPE. Please note, it 113181f1ba58SUladzislau Rezki (Sony) * does not guarantee that an allocation occurs on a CPU that 113281f1ba58SUladzislau Rezki (Sony) * is preloaded, instead we minimize the case when it is not. 113381f1ba58SUladzislau Rezki (Sony) * It can happen because of cpu migration, because there is a 113481f1ba58SUladzislau Rezki (Sony) * race until the below spinlock is taken. 113582dd23e8SUladzislau Rezki (Sony) * 113682dd23e8SUladzislau Rezki (Sony) * The preload is done in non-atomic context, thus it allows us 113782dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks to be more stable under 113881f1ba58SUladzislau Rezki (Sony) * low memory condition and high memory pressure. In rare case, 113981f1ba58SUladzislau Rezki (Sony) * if not preloaded, GFP_NOWAIT is used. 114082dd23e8SUladzislau Rezki (Sony) * 114181f1ba58SUladzislau Rezki (Sony) * Set "pva" to NULL here, because of "retry" path. 114282dd23e8SUladzislau Rezki (Sony) */ 114381f1ba58SUladzislau Rezki (Sony) pva = NULL; 114482dd23e8SUladzislau Rezki (Sony) 114581f1ba58SUladzislau Rezki (Sony) if (!this_cpu_read(ne_fit_preload_node)) 114681f1ba58SUladzislau Rezki (Sony) /* 114781f1ba58SUladzislau Rezki (Sony) * Even if it fails we do not really care about that. 114881f1ba58SUladzislau Rezki (Sony) * Just proceed as it is. If needed "overflow" path 114981f1ba58SUladzislau Rezki (Sony) * will refill the cache we allocate from. 115081f1ba58SUladzislau Rezki (Sony) */ 1151f07116d7SUladzislau Rezki (Sony) pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 115282dd23e8SUladzislau Rezki (Sony) 1153e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 115481f1ba58SUladzislau Rezki (Sony) 115581f1ba58SUladzislau Rezki (Sony) if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) 115681f1ba58SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, pva); 115768ad4a33SUladzislau Rezki (Sony) 115889699605SNick Piggin /* 115968ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is 116068ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path. 116189699605SNick Piggin */ 1162cacca6baSUladzislau Rezki (Sony) addr = __alloc_vmap_area(size, align, vstart, vend); 1163e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 1164e36176beSUladzislau Rezki (Sony) 116568ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend)) 116689699605SNick Piggin goto overflow; 116789699605SNick Piggin 116889699605SNick Piggin va->va_start = addr; 116989699605SNick Piggin va->va_end = addr + size; 1170688fcbfcSPengfei Li va->vm = NULL; 117168ad4a33SUladzislau Rezki (Sony) 1172d98c9e83SAndrey Ryabinin 1173e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 1174e36176beSUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 117589699605SNick Piggin spin_unlock(&vmap_area_lock); 117689699605SNick Piggin 117761e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 117889699605SNick Piggin BUG_ON(va->va_start < vstart); 117989699605SNick Piggin BUG_ON(va->va_end > vend); 118089699605SNick Piggin 1181d98c9e83SAndrey Ryabinin ret = kasan_populate_vmalloc(addr, size); 1182d98c9e83SAndrey Ryabinin if (ret) { 1183d98c9e83SAndrey Ryabinin free_vmap_area(va); 1184d98c9e83SAndrey Ryabinin return ERR_PTR(ret); 1185d98c9e83SAndrey Ryabinin } 1186d98c9e83SAndrey Ryabinin 118789699605SNick Piggin return va; 118889699605SNick Piggin 11897766970cSNick Piggin overflow: 1190db64fe02SNick Piggin if (!purged) { 1191db64fe02SNick Piggin purge_vmap_area_lazy(); 1192db64fe02SNick Piggin purged = 1; 1193db64fe02SNick Piggin goto retry; 1194db64fe02SNick Piggin } 11954da56b99SChris Wilson 11964da56b99SChris Wilson if (gfpflags_allow_blocking(gfp_mask)) { 11974da56b99SChris Wilson unsigned long freed = 0; 11984da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 11994da56b99SChris Wilson if (freed > 0) { 12004da56b99SChris Wilson purged = 0; 12014da56b99SChris Wilson goto retry; 12024da56b99SChris Wilson } 12034da56b99SChris Wilson } 12044da56b99SChris Wilson 120503497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1206756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1207756a025fSJoe Perches size); 120868ad4a33SUladzislau Rezki (Sony) 120968ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1210db64fe02SNick Piggin return ERR_PTR(-EBUSY); 1211db64fe02SNick Piggin } 1212db64fe02SNick Piggin 12134da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 12144da56b99SChris Wilson { 12154da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 12164da56b99SChris Wilson } 12174da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 12184da56b99SChris Wilson 12194da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 12204da56b99SChris Wilson { 12214da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 12224da56b99SChris Wilson } 12234da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 12244da56b99SChris Wilson 1225db64fe02SNick Piggin /* 1226db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 1227db64fe02SNick Piggin */ 1228db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 1229db64fe02SNick Piggin { 1230db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 1231db64fe02SNick Piggin } 1232db64fe02SNick Piggin 1233db64fe02SNick Piggin /* 1234db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 1235db64fe02SNick Piggin * before attempting to purge with a TLB flush. 1236db64fe02SNick Piggin * 1237db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 1238db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 1239db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 1240db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 1241db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 1242db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 1243db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 1244db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 1245db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 1246db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 1247db64fe02SNick Piggin * becomes a problem on bigger systems. 1248db64fe02SNick Piggin */ 1249db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 1250db64fe02SNick Piggin { 1251db64fe02SNick Piggin unsigned int log; 1252db64fe02SNick Piggin 1253db64fe02SNick Piggin log = fls(num_online_cpus()); 1254db64fe02SNick Piggin 1255db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1256db64fe02SNick Piggin } 1257db64fe02SNick Piggin 12584d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1259db64fe02SNick Piggin 12600574ecd1SChristoph Hellwig /* 12610574ecd1SChristoph Hellwig * Serialize vmap purging. There is no actual criticial section protected 12620574ecd1SChristoph Hellwig * by this look, but we want to avoid concurrent calls for performance 12630574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 12640574ecd1SChristoph Hellwig */ 1265f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 12660574ecd1SChristoph Hellwig 126702b709dfSNick Piggin /* for per-CPU blocks */ 126802b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 126902b709dfSNick Piggin 1270db64fe02SNick Piggin /* 12713ee48b6aSCliff Wickman * called before a call to iounmap() if the caller wants vm_area_struct's 12723ee48b6aSCliff Wickman * immediately freed. 12733ee48b6aSCliff Wickman */ 12743ee48b6aSCliff Wickman void set_iounmap_nonlazy(void) 12753ee48b6aSCliff Wickman { 12764d36e6f8SUladzislau Rezki (Sony) atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); 12773ee48b6aSCliff Wickman } 12783ee48b6aSCliff Wickman 12793ee48b6aSCliff Wickman /* 1280db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 1281db64fe02SNick Piggin */ 12820574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 1283db64fe02SNick Piggin { 12844d36e6f8SUladzislau Rezki (Sony) unsigned long resched_threshold; 128580c4bd7aSChris Wilson struct llist_node *valist; 1286db64fe02SNick Piggin struct vmap_area *va; 1287cbb76676SVegard Nossum struct vmap_area *n_va; 1288db64fe02SNick Piggin 12890574ecd1SChristoph Hellwig lockdep_assert_held(&vmap_purge_lock); 129002b709dfSNick Piggin 129180c4bd7aSChris Wilson valist = llist_del_all(&vmap_purge_list); 129268571be9SUladzislau Rezki (Sony) if (unlikely(valist == NULL)) 129368571be9SUladzislau Rezki (Sony) return false; 129468571be9SUladzislau Rezki (Sony) 129568571be9SUladzislau Rezki (Sony) /* 12963f8fd02bSJoerg Roedel * First make sure the mappings are removed from all page-tables 12973f8fd02bSJoerg Roedel * before they are freed. 12983f8fd02bSJoerg Roedel */ 1299763802b5SJoerg Roedel vmalloc_sync_unmappings(); 13003f8fd02bSJoerg Roedel 13013f8fd02bSJoerg Roedel /* 130268571be9SUladzislau Rezki (Sony) * TODO: to calculate a flush range without looping. 130368571be9SUladzislau Rezki (Sony) * The list can be up to lazy_max_pages() elements. 130468571be9SUladzislau Rezki (Sony) */ 130580c4bd7aSChris Wilson llist_for_each_entry(va, valist, purge_list) { 13060574ecd1SChristoph Hellwig if (va->va_start < start) 13070574ecd1SChristoph Hellwig start = va->va_start; 13080574ecd1SChristoph Hellwig if (va->va_end > end) 13090574ecd1SChristoph Hellwig end = va->va_end; 1310db64fe02SNick Piggin } 1311db64fe02SNick Piggin 13120574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 13134d36e6f8SUladzislau Rezki (Sony) resched_threshold = lazy_max_pages() << 1; 1314db64fe02SNick Piggin 1315e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 1316763b218dSJoel Fernandes llist_for_each_entry_safe(va, n_va, valist, purge_list) { 13174d36e6f8SUladzislau Rezki (Sony) unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 13183c5c3cfbSDaniel Axtens unsigned long orig_start = va->va_start; 13193c5c3cfbSDaniel Axtens unsigned long orig_end = va->va_end; 1320763b218dSJoel Fernandes 1321dd3b8353SUladzislau Rezki (Sony) /* 1322dd3b8353SUladzislau Rezki (Sony) * Finally insert or merge lazily-freed area. It is 1323dd3b8353SUladzislau Rezki (Sony) * detached and there is no need to "unlink" it from 1324dd3b8353SUladzislau Rezki (Sony) * anything. 1325dd3b8353SUladzislau Rezki (Sony) */ 13263c5c3cfbSDaniel Axtens va = merge_or_add_vmap_area(va, &free_vmap_area_root, 13273c5c3cfbSDaniel Axtens &free_vmap_area_list); 13283c5c3cfbSDaniel Axtens 13293c5c3cfbSDaniel Axtens if (is_vmalloc_or_module_addr((void *)orig_start)) 13303c5c3cfbSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 13313c5c3cfbSDaniel Axtens va->va_start, va->va_end); 1332dd3b8353SUladzislau Rezki (Sony) 13334d36e6f8SUladzislau Rezki (Sony) atomic_long_sub(nr, &vmap_lazy_nr); 133468571be9SUladzislau Rezki (Sony) 13354d36e6f8SUladzislau Rezki (Sony) if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) 1336e36176beSUladzislau Rezki (Sony) cond_resched_lock(&free_vmap_area_lock); 1337763b218dSJoel Fernandes } 1338e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 13390574ecd1SChristoph Hellwig return true; 1340db64fe02SNick Piggin } 1341db64fe02SNick Piggin 1342db64fe02SNick Piggin /* 1343496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 1344496850e5SNick Piggin * is already purging. 1345496850e5SNick Piggin */ 1346496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 1347496850e5SNick Piggin { 1348f9e09977SChristoph Hellwig if (mutex_trylock(&vmap_purge_lock)) { 13490574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1350f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 13510574ecd1SChristoph Hellwig } 1352496850e5SNick Piggin } 1353496850e5SNick Piggin 1354496850e5SNick Piggin /* 1355db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 1356db64fe02SNick Piggin */ 1357db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 1358db64fe02SNick Piggin { 1359f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 13600574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 13610574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1362f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1363db64fe02SNick Piggin } 1364db64fe02SNick Piggin 1365db64fe02SNick Piggin /* 136664141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 136764141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 136864141da5SJeremy Fitzhardinge * previously. 1369db64fe02SNick Piggin */ 137064141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 1371db64fe02SNick Piggin { 13724d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy; 137380c4bd7aSChris Wilson 1374dd3b8353SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 1375dd3b8353SUladzislau Rezki (Sony) unlink_va(va, &vmap_area_root); 1376dd3b8353SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 1377dd3b8353SUladzislau Rezki (Sony) 13784d36e6f8SUladzislau Rezki (Sony) nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 13794d36e6f8SUladzislau Rezki (Sony) PAGE_SHIFT, &vmap_lazy_nr); 138080c4bd7aSChris Wilson 138180c4bd7aSChris Wilson /* After this point, we may free va at any time */ 138280c4bd7aSChris Wilson llist_add(&va->purge_list, &vmap_purge_list); 138380c4bd7aSChris Wilson 138480c4bd7aSChris Wilson if (unlikely(nr_lazy > lazy_max_pages())) 1385496850e5SNick Piggin try_purge_vmap_area_lazy(); 1386db64fe02SNick Piggin } 1387db64fe02SNick Piggin 1388b29acbdcSNick Piggin /* 1389b29acbdcSNick Piggin * Free and unmap a vmap area 1390b29acbdcSNick Piggin */ 1391b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 1392b29acbdcSNick Piggin { 1393b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 1394c8eef01eSChristoph Hellwig unmap_vmap_area(va); 13958e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 139682a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end); 139782a2e924SChintan Pandya 1398c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 1399b29acbdcSNick Piggin } 1400b29acbdcSNick Piggin 1401db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 1402db64fe02SNick Piggin { 1403db64fe02SNick Piggin struct vmap_area *va; 1404db64fe02SNick Piggin 1405db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1406db64fe02SNick Piggin va = __find_vmap_area(addr); 1407db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 1408db64fe02SNick Piggin 1409db64fe02SNick Piggin return va; 1410db64fe02SNick Piggin } 1411db64fe02SNick Piggin 1412db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 1413db64fe02SNick Piggin 1414db64fe02SNick Piggin /* 1415db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 1416db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 1417db64fe02SNick Piggin */ 1418db64fe02SNick Piggin /* 1419db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 1420db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 1421db64fe02SNick Piggin * instead (we just need a rough idea) 1422db64fe02SNick Piggin */ 1423db64fe02SNick Piggin #if BITS_PER_LONG == 32 1424db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 1425db64fe02SNick Piggin #else 1426db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 1427db64fe02SNick Piggin #endif 1428db64fe02SNick Piggin 1429db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 1430db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 1431db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 1432db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 1433db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 1434db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 1435f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 1436f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 1437db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 1438f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 1439db64fe02SNick Piggin 1440db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 1441db64fe02SNick Piggin 1442db64fe02SNick Piggin struct vmap_block_queue { 1443db64fe02SNick Piggin spinlock_t lock; 1444db64fe02SNick Piggin struct list_head free; 1445db64fe02SNick Piggin }; 1446db64fe02SNick Piggin 1447db64fe02SNick Piggin struct vmap_block { 1448db64fe02SNick Piggin spinlock_t lock; 1449db64fe02SNick Piggin struct vmap_area *va; 1450db64fe02SNick Piggin unsigned long free, dirty; 14517d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 1452db64fe02SNick Piggin struct list_head free_list; 1453db64fe02SNick Piggin struct rcu_head rcu_head; 145402b709dfSNick Piggin struct list_head purge; 1455db64fe02SNick Piggin }; 1456db64fe02SNick Piggin 1457db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 1458db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 1459db64fe02SNick Piggin 1460db64fe02SNick Piggin /* 1461db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 1462db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 1463db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 1464db64fe02SNick Piggin */ 1465db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 1466db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 1467db64fe02SNick Piggin 1468db64fe02SNick Piggin /* 1469db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 1470db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 1471db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 1472db64fe02SNick Piggin * big problem. 1473db64fe02SNick Piggin */ 1474db64fe02SNick Piggin 1475db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 1476db64fe02SNick Piggin { 1477db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 1478db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 1479db64fe02SNick Piggin return addr; 1480db64fe02SNick Piggin } 1481db64fe02SNick Piggin 1482cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 1483cf725ce2SRoman Pen { 1484cf725ce2SRoman Pen unsigned long addr; 1485cf725ce2SRoman Pen 1486cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 1487cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 1488cf725ce2SRoman Pen return (void *)addr; 1489cf725ce2SRoman Pen } 1490cf725ce2SRoman Pen 1491cf725ce2SRoman Pen /** 1492cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 1493cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 1494cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 1495cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 1496cf725ce2SRoman Pen * 1497a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 1498cf725ce2SRoman Pen */ 1499cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 1500db64fe02SNick Piggin { 1501db64fe02SNick Piggin struct vmap_block_queue *vbq; 1502db64fe02SNick Piggin struct vmap_block *vb; 1503db64fe02SNick Piggin struct vmap_area *va; 1504db64fe02SNick Piggin unsigned long vb_idx; 1505db64fe02SNick Piggin int node, err; 1506cf725ce2SRoman Pen void *vaddr; 1507db64fe02SNick Piggin 1508db64fe02SNick Piggin node = numa_node_id(); 1509db64fe02SNick Piggin 1510db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 1511db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 1512db64fe02SNick Piggin if (unlikely(!vb)) 1513db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1514db64fe02SNick Piggin 1515db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 1516db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 1517db64fe02SNick Piggin node, gfp_mask); 1518ddf9c6d4STobias Klauser if (IS_ERR(va)) { 1519db64fe02SNick Piggin kfree(vb); 1520e7d86340SJulia Lawall return ERR_CAST(va); 1521db64fe02SNick Piggin } 1522db64fe02SNick Piggin 1523db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 1524db64fe02SNick Piggin if (unlikely(err)) { 1525db64fe02SNick Piggin kfree(vb); 1526db64fe02SNick Piggin free_vmap_area(va); 1527db64fe02SNick Piggin return ERR_PTR(err); 1528db64fe02SNick Piggin } 1529db64fe02SNick Piggin 1530cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 1531db64fe02SNick Piggin spin_lock_init(&vb->lock); 1532db64fe02SNick Piggin vb->va = va; 1533cf725ce2SRoman Pen /* At least something should be left free */ 1534cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 1535cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 1536db64fe02SNick Piggin vb->dirty = 0; 15377d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 15387d61bfe8SRoman Pen vb->dirty_max = 0; 1539db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 1540db64fe02SNick Piggin 1541db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 1542db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 1543db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 1544db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 1545db64fe02SNick Piggin BUG_ON(err); 1546db64fe02SNick Piggin radix_tree_preload_end(); 1547db64fe02SNick Piggin 1548db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1549db64fe02SNick Piggin spin_lock(&vbq->lock); 155068ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 1551db64fe02SNick Piggin spin_unlock(&vbq->lock); 15523f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1553db64fe02SNick Piggin 1554cf725ce2SRoman Pen return vaddr; 1555db64fe02SNick Piggin } 1556db64fe02SNick Piggin 1557db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 1558db64fe02SNick Piggin { 1559db64fe02SNick Piggin struct vmap_block *tmp; 1560db64fe02SNick Piggin unsigned long vb_idx; 1561db64fe02SNick Piggin 1562db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 1563db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 1564db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 1565db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 1566db64fe02SNick Piggin BUG_ON(tmp != vb); 1567db64fe02SNick Piggin 156864141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 156922a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 1570db64fe02SNick Piggin } 1571db64fe02SNick Piggin 157202b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 157302b709dfSNick Piggin { 157402b709dfSNick Piggin LIST_HEAD(purge); 157502b709dfSNick Piggin struct vmap_block *vb; 157602b709dfSNick Piggin struct vmap_block *n_vb; 157702b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 157802b709dfSNick Piggin 157902b709dfSNick Piggin rcu_read_lock(); 158002b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 158102b709dfSNick Piggin 158202b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 158302b709dfSNick Piggin continue; 158402b709dfSNick Piggin 158502b709dfSNick Piggin spin_lock(&vb->lock); 158602b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 158702b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 158802b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 15897d61bfe8SRoman Pen vb->dirty_min = 0; 15907d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 159102b709dfSNick Piggin spin_lock(&vbq->lock); 159202b709dfSNick Piggin list_del_rcu(&vb->free_list); 159302b709dfSNick Piggin spin_unlock(&vbq->lock); 159402b709dfSNick Piggin spin_unlock(&vb->lock); 159502b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 159602b709dfSNick Piggin } else 159702b709dfSNick Piggin spin_unlock(&vb->lock); 159802b709dfSNick Piggin } 159902b709dfSNick Piggin rcu_read_unlock(); 160002b709dfSNick Piggin 160102b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 160202b709dfSNick Piggin list_del(&vb->purge); 160302b709dfSNick Piggin free_vmap_block(vb); 160402b709dfSNick Piggin } 160502b709dfSNick Piggin } 160602b709dfSNick Piggin 160702b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 160802b709dfSNick Piggin { 160902b709dfSNick Piggin int cpu; 161002b709dfSNick Piggin 161102b709dfSNick Piggin for_each_possible_cpu(cpu) 161202b709dfSNick Piggin purge_fragmented_blocks(cpu); 161302b709dfSNick Piggin } 161402b709dfSNick Piggin 1615db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 1616db64fe02SNick Piggin { 1617db64fe02SNick Piggin struct vmap_block_queue *vbq; 1618db64fe02SNick Piggin struct vmap_block *vb; 1619cf725ce2SRoman Pen void *vaddr = NULL; 1620db64fe02SNick Piggin unsigned int order; 1621db64fe02SNick Piggin 1622891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1623db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1624aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 1625aa91c4d8SJan Kara /* 1626aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 1627aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 1628aa91c4d8SJan Kara * early. 1629aa91c4d8SJan Kara */ 1630aa91c4d8SJan Kara return NULL; 1631aa91c4d8SJan Kara } 1632db64fe02SNick Piggin order = get_order(size); 1633db64fe02SNick Piggin 1634db64fe02SNick Piggin rcu_read_lock(); 1635db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1636db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1637cf725ce2SRoman Pen unsigned long pages_off; 1638db64fe02SNick Piggin 1639db64fe02SNick Piggin spin_lock(&vb->lock); 1640cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 1641cf725ce2SRoman Pen spin_unlock(&vb->lock); 1642cf725ce2SRoman Pen continue; 1643cf725ce2SRoman Pen } 164402b709dfSNick Piggin 1645cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 1646cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 1647db64fe02SNick Piggin vb->free -= 1UL << order; 1648db64fe02SNick Piggin if (vb->free == 0) { 1649db64fe02SNick Piggin spin_lock(&vbq->lock); 1650de560423SNick Piggin list_del_rcu(&vb->free_list); 1651db64fe02SNick Piggin spin_unlock(&vbq->lock); 1652db64fe02SNick Piggin } 1653cf725ce2SRoman Pen 1654db64fe02SNick Piggin spin_unlock(&vb->lock); 1655db64fe02SNick Piggin break; 1656db64fe02SNick Piggin } 165702b709dfSNick Piggin 16583f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1659db64fe02SNick Piggin rcu_read_unlock(); 1660db64fe02SNick Piggin 1661cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 1662cf725ce2SRoman Pen if (!vaddr) 1663cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 1664db64fe02SNick Piggin 1665cf725ce2SRoman Pen return vaddr; 1666db64fe02SNick Piggin } 1667db64fe02SNick Piggin 1668db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 1669db64fe02SNick Piggin { 1670db64fe02SNick Piggin unsigned long offset; 1671db64fe02SNick Piggin unsigned long vb_idx; 1672db64fe02SNick Piggin unsigned int order; 1673db64fe02SNick Piggin struct vmap_block *vb; 1674db64fe02SNick Piggin 1675891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1676db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1677b29acbdcSNick Piggin 1678b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 1679b29acbdcSNick Piggin 1680db64fe02SNick Piggin order = get_order(size); 1681db64fe02SNick Piggin 1682db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 16837d61bfe8SRoman Pen offset >>= PAGE_SHIFT; 1684db64fe02SNick Piggin 1685db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 1686db64fe02SNick Piggin rcu_read_lock(); 1687db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 1688db64fe02SNick Piggin rcu_read_unlock(); 1689db64fe02SNick Piggin BUG_ON(!vb); 1690db64fe02SNick Piggin 169164141da5SJeremy Fitzhardinge vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 169264141da5SJeremy Fitzhardinge 16938e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 169482a2e924SChintan Pandya flush_tlb_kernel_range((unsigned long)addr, 169582a2e924SChintan Pandya (unsigned long)addr + size); 169682a2e924SChintan Pandya 1697db64fe02SNick Piggin spin_lock(&vb->lock); 16987d61bfe8SRoman Pen 16997d61bfe8SRoman Pen /* Expand dirty range */ 17007d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 17017d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 1702d086817dSMinChan Kim 1703db64fe02SNick Piggin vb->dirty += 1UL << order; 1704db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 1705de560423SNick Piggin BUG_ON(vb->free); 1706db64fe02SNick Piggin spin_unlock(&vb->lock); 1707db64fe02SNick Piggin free_vmap_block(vb); 1708db64fe02SNick Piggin } else 1709db64fe02SNick Piggin spin_unlock(&vb->lock); 1710db64fe02SNick Piggin } 1711db64fe02SNick Piggin 1712868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 1713db64fe02SNick Piggin { 1714db64fe02SNick Piggin int cpu; 1715db64fe02SNick Piggin 17169b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 17179b463334SJeremy Fitzhardinge return; 17189b463334SJeremy Fitzhardinge 17195803ed29SChristoph Hellwig might_sleep(); 17205803ed29SChristoph Hellwig 1721db64fe02SNick Piggin for_each_possible_cpu(cpu) { 1722db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1723db64fe02SNick Piggin struct vmap_block *vb; 1724db64fe02SNick Piggin 1725db64fe02SNick Piggin rcu_read_lock(); 1726db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1727db64fe02SNick Piggin spin_lock(&vb->lock); 17287d61bfe8SRoman Pen if (vb->dirty) { 17297d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 1730db64fe02SNick Piggin unsigned long s, e; 1731b136be5eSJoonsoo Kim 17327d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 17337d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 1734db64fe02SNick Piggin 17357d61bfe8SRoman Pen start = min(s, start); 17367d61bfe8SRoman Pen end = max(e, end); 17377d61bfe8SRoman Pen 1738db64fe02SNick Piggin flush = 1; 1739db64fe02SNick Piggin } 1740db64fe02SNick Piggin spin_unlock(&vb->lock); 1741db64fe02SNick Piggin } 1742db64fe02SNick Piggin rcu_read_unlock(); 1743db64fe02SNick Piggin } 1744db64fe02SNick Piggin 1745f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 17460574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 17470574ecd1SChristoph Hellwig if (!__purge_vmap_area_lazy(start, end) && flush) 17480574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 1749f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1750db64fe02SNick Piggin } 1751868b104dSRick Edgecombe 1752868b104dSRick Edgecombe /** 1753868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 1754868b104dSRick Edgecombe * 1755868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1756868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you 1757868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual 1758868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries 1759868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping). 1760868b104dSRick Edgecombe * 1761868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1762868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases 1763868b104dSRick Edgecombe * from the vmap layer. 1764868b104dSRick Edgecombe */ 1765868b104dSRick Edgecombe void vm_unmap_aliases(void) 1766868b104dSRick Edgecombe { 1767868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 1768868b104dSRick Edgecombe int flush = 0; 1769868b104dSRick Edgecombe 1770868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush); 1771868b104dSRick Edgecombe } 1772db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1773db64fe02SNick Piggin 1774db64fe02SNick Piggin /** 1775db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1776db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 1777db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1778db64fe02SNick Piggin */ 1779db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 1780db64fe02SNick Piggin { 178165ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1782db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 17839c3acf60SChristoph Hellwig struct vmap_area *va; 1784db64fe02SNick Piggin 17855803ed29SChristoph Hellwig might_sleep(); 1786db64fe02SNick Piggin BUG_ON(!addr); 1787db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 1788db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 1789a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 1790db64fe02SNick Piggin 1791d98c9e83SAndrey Ryabinin kasan_poison_vmalloc(mem, size); 1792d98c9e83SAndrey Ryabinin 17939c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 179405e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size); 1795db64fe02SNick Piggin vb_free(mem, size); 17969c3acf60SChristoph Hellwig return; 17979c3acf60SChristoph Hellwig } 17989c3acf60SChristoph Hellwig 17999c3acf60SChristoph Hellwig va = find_vmap_area(addr); 18009c3acf60SChristoph Hellwig BUG_ON(!va); 180105e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start, 180205e3ff95SChintan Pandya (va->va_end - va->va_start)); 18039c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 1804db64fe02SNick Piggin } 1805db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 1806db64fe02SNick Piggin 1807db64fe02SNick Piggin /** 1808db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1809db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 1810db64fe02SNick Piggin * @count: number of pages 1811db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 1812db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1813e99c97adSRandy Dunlap * 181436437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 181536437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 181636437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 181736437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 181836437638SGioh Kim * the end. Please use this function for short-lived objects. 181936437638SGioh Kim * 1820e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 1821db64fe02SNick Piggin */ 1822db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1823db64fe02SNick Piggin { 182465ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1825db64fe02SNick Piggin unsigned long addr; 1826db64fe02SNick Piggin void *mem; 1827db64fe02SNick Piggin 1828db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 1829db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 1830db64fe02SNick Piggin if (IS_ERR(mem)) 1831db64fe02SNick Piggin return NULL; 1832db64fe02SNick Piggin addr = (unsigned long)mem; 1833db64fe02SNick Piggin } else { 1834db64fe02SNick Piggin struct vmap_area *va; 1835db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 1836db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1837db64fe02SNick Piggin if (IS_ERR(va)) 1838db64fe02SNick Piggin return NULL; 1839db64fe02SNick Piggin 1840db64fe02SNick Piggin addr = va->va_start; 1841db64fe02SNick Piggin mem = (void *)addr; 1842db64fe02SNick Piggin } 1843d98c9e83SAndrey Ryabinin 1844d98c9e83SAndrey Ryabinin kasan_unpoison_vmalloc(mem, size); 1845d98c9e83SAndrey Ryabinin 1846db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1847db64fe02SNick Piggin vm_unmap_ram(mem, count); 1848db64fe02SNick Piggin return NULL; 1849db64fe02SNick Piggin } 1850db64fe02SNick Piggin return mem; 1851db64fe02SNick Piggin } 1852db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 1853db64fe02SNick Piggin 18544341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 185592eac168SMike Rapoport 1856f0aa6617STejun Heo /** 1857be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 1858be9b7335SNicolas Pitre * @vm: vm_struct to add 1859be9b7335SNicolas Pitre * 1860be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 1861be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1862be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 1863be9b7335SNicolas Pitre * 1864be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1865be9b7335SNicolas Pitre */ 1866be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 1867be9b7335SNicolas Pitre { 1868be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 1869be9b7335SNicolas Pitre 1870be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 1871be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1872be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 1873be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 1874be9b7335SNicolas Pitre break; 1875be9b7335SNicolas Pitre } else 1876be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 1877be9b7335SNicolas Pitre } 1878be9b7335SNicolas Pitre vm->next = *p; 1879be9b7335SNicolas Pitre *p = vm; 1880be9b7335SNicolas Pitre } 1881be9b7335SNicolas Pitre 1882be9b7335SNicolas Pitre /** 1883f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 1884f0aa6617STejun Heo * @vm: vm_struct to register 1885c0c0a293STejun Heo * @align: requested alignment 1886f0aa6617STejun Heo * 1887f0aa6617STejun Heo * This function is used to register kernel vm area before 1888f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 1889f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 1890f0aa6617STejun Heo * vm->addr contains the allocated address. 1891f0aa6617STejun Heo * 1892f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1893f0aa6617STejun Heo */ 1894c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1895f0aa6617STejun Heo { 1896f0aa6617STejun Heo static size_t vm_init_off __initdata; 1897c0c0a293STejun Heo unsigned long addr; 1898f0aa6617STejun Heo 1899c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 1900c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1901c0c0a293STejun Heo 1902c0c0a293STejun Heo vm->addr = (void *)addr; 1903f0aa6617STejun Heo 1904be9b7335SNicolas Pitre vm_area_add_early(vm); 1905f0aa6617STejun Heo } 1906f0aa6617STejun Heo 190768ad4a33SUladzislau Rezki (Sony) static void vmap_init_free_space(void) 190868ad4a33SUladzislau Rezki (Sony) { 190968ad4a33SUladzislau Rezki (Sony) unsigned long vmap_start = 1; 191068ad4a33SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX; 191168ad4a33SUladzislau Rezki (Sony) struct vmap_area *busy, *free; 191268ad4a33SUladzislau Rezki (Sony) 191368ad4a33SUladzislau Rezki (Sony) /* 191468ad4a33SUladzislau Rezki (Sony) * B F B B B F 191568ad4a33SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|- 191668ad4a33SUladzislau Rezki (Sony) * | The KVA space | 191768ad4a33SUladzislau Rezki (Sony) * |<--------------------------------->| 191868ad4a33SUladzislau Rezki (Sony) */ 191968ad4a33SUladzislau Rezki (Sony) list_for_each_entry(busy, &vmap_area_list, list) { 192068ad4a33SUladzislau Rezki (Sony) if (busy->va_start - vmap_start > 0) { 192168ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 192268ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 192368ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 192468ad4a33SUladzislau Rezki (Sony) free->va_end = busy->va_start; 192568ad4a33SUladzislau Rezki (Sony) 192668ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 192768ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 192868ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 192968ad4a33SUladzislau Rezki (Sony) } 193068ad4a33SUladzislau Rezki (Sony) } 193168ad4a33SUladzislau Rezki (Sony) 193268ad4a33SUladzislau Rezki (Sony) vmap_start = busy->va_end; 193368ad4a33SUladzislau Rezki (Sony) } 193468ad4a33SUladzislau Rezki (Sony) 193568ad4a33SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) { 193668ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 193768ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 193868ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 193968ad4a33SUladzislau Rezki (Sony) free->va_end = vmap_end; 194068ad4a33SUladzislau Rezki (Sony) 194168ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 194268ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 194368ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 194468ad4a33SUladzislau Rezki (Sony) } 194568ad4a33SUladzislau Rezki (Sony) } 194668ad4a33SUladzislau Rezki (Sony) } 194768ad4a33SUladzislau Rezki (Sony) 1948db64fe02SNick Piggin void __init vmalloc_init(void) 1949db64fe02SNick Piggin { 1950822c18f2SIvan Kokshaysky struct vmap_area *va; 1951822c18f2SIvan Kokshaysky struct vm_struct *tmp; 1952db64fe02SNick Piggin int i; 1953db64fe02SNick Piggin 195468ad4a33SUladzislau Rezki (Sony) /* 195568ad4a33SUladzislau Rezki (Sony) * Create the cache for vmap_area objects. 195668ad4a33SUladzislau Rezki (Sony) */ 195768ad4a33SUladzislau Rezki (Sony) vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 195868ad4a33SUladzislau Rezki (Sony) 1959db64fe02SNick Piggin for_each_possible_cpu(i) { 1960db64fe02SNick Piggin struct vmap_block_queue *vbq; 196132fcfd40SAl Viro struct vfree_deferred *p; 1962db64fe02SNick Piggin 1963db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1964db64fe02SNick Piggin spin_lock_init(&vbq->lock); 1965db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 196632fcfd40SAl Viro p = &per_cpu(vfree_deferred, i); 196732fcfd40SAl Viro init_llist_head(&p->list); 196832fcfd40SAl Viro INIT_WORK(&p->wq, free_work); 1969db64fe02SNick Piggin } 19709b463334SJeremy Fitzhardinge 1971822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1972822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 197368ad4a33SUladzislau Rezki (Sony) va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 197468ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va)) 197568ad4a33SUladzislau Rezki (Sony) continue; 197668ad4a33SUladzislau Rezki (Sony) 1977822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1978822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1979dbda591dSKyongHo va->vm = tmp; 198068ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 1981822c18f2SIvan Kokshaysky } 1982ca23e405STejun Heo 198368ad4a33SUladzislau Rezki (Sony) /* 198468ad4a33SUladzislau Rezki (Sony) * Now we can initialize a free vmap space. 198568ad4a33SUladzislau Rezki (Sony) */ 198668ad4a33SUladzislau Rezki (Sony) vmap_init_free_space(); 19879b463334SJeremy Fitzhardinge vmap_initialized = true; 1988db64fe02SNick Piggin } 1989db64fe02SNick Piggin 19908fc48985STejun Heo /** 19918fc48985STejun Heo * map_kernel_range_noflush - map kernel VM area with the specified pages 19928fc48985STejun Heo * @addr: start of the VM area to map 19938fc48985STejun Heo * @size: size of the VM area to map 19948fc48985STejun Heo * @prot: page protection flags to use 19958fc48985STejun Heo * @pages: pages to map 19968fc48985STejun Heo * 19978fc48985STejun Heo * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 19988fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 19998fc48985STejun Heo * friends. 20008fc48985STejun Heo * 20018fc48985STejun Heo * NOTE: 20028fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 20038fc48985STejun Heo * responsible for calling flush_cache_vmap() on to-be-mapped areas 20048fc48985STejun Heo * before calling this function. 20058fc48985STejun Heo * 20068fc48985STejun Heo * RETURNS: 20078fc48985STejun Heo * The number of pages mapped on success, -errno on failure. 20088fc48985STejun Heo */ 20098fc48985STejun Heo int map_kernel_range_noflush(unsigned long addr, unsigned long size, 20108fc48985STejun Heo pgprot_t prot, struct page **pages) 20118fc48985STejun Heo { 20128fc48985STejun Heo return vmap_page_range_noflush(addr, addr + size, prot, pages); 20138fc48985STejun Heo } 20148fc48985STejun Heo 20158fc48985STejun Heo /** 20168fc48985STejun Heo * unmap_kernel_range_noflush - unmap kernel VM area 20178fc48985STejun Heo * @addr: start of the VM area to unmap 20188fc48985STejun Heo * @size: size of the VM area to unmap 20198fc48985STejun Heo * 20208fc48985STejun Heo * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 20218fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 20228fc48985STejun Heo * friends. 20238fc48985STejun Heo * 20248fc48985STejun Heo * NOTE: 20258fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 20268fc48985STejun Heo * responsible for calling flush_cache_vunmap() on to-be-mapped areas 20278fc48985STejun Heo * before calling this function and flush_tlb_kernel_range() after. 20288fc48985STejun Heo */ 20298fc48985STejun Heo void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 20308fc48985STejun Heo { 20318fc48985STejun Heo vunmap_page_range(addr, addr + size); 20328fc48985STejun Heo } 203381e88fdcSHuang Ying EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 20348fc48985STejun Heo 20358fc48985STejun Heo /** 20368fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 20378fc48985STejun Heo * @addr: start of the VM area to unmap 20388fc48985STejun Heo * @size: size of the VM area to unmap 20398fc48985STejun Heo * 20408fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 20418fc48985STejun Heo * the unmapping and tlb after. 20428fc48985STejun Heo */ 2043db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 2044db64fe02SNick Piggin { 2045db64fe02SNick Piggin unsigned long end = addr + size; 2046f6fcba70STejun Heo 2047f6fcba70STejun Heo flush_cache_vunmap(addr, end); 2048db64fe02SNick Piggin vunmap_page_range(addr, end); 2049db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 2050db64fe02SNick Piggin } 205193ef6d6cSMinchan Kim EXPORT_SYMBOL_GPL(unmap_kernel_range); 2052db64fe02SNick Piggin 2053f6f8ed47SWANG Chao int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) 2054db64fe02SNick Piggin { 2055db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 2056762216abSWanpeng Li unsigned long end = addr + get_vm_area_size(area); 2057db64fe02SNick Piggin int err; 2058db64fe02SNick Piggin 2059f6f8ed47SWANG Chao err = vmap_page_range(addr, end, prot, pages); 2060db64fe02SNick Piggin 2061f6f8ed47SWANG Chao return err > 0 ? 0 : err; 2062db64fe02SNick Piggin } 2063db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 2064db64fe02SNick Piggin 2065e36176beSUladzislau Rezki (Sony) static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, 2066e36176beSUladzislau Rezki (Sony) struct vmap_area *va, unsigned long flags, const void *caller) 2067cf88c790STejun Heo { 2068cf88c790STejun Heo vm->flags = flags; 2069cf88c790STejun Heo vm->addr = (void *)va->va_start; 2070cf88c790STejun Heo vm->size = va->va_end - va->va_start; 2071cf88c790STejun Heo vm->caller = caller; 2072db1aecafSMinchan Kim va->vm = vm; 2073e36176beSUladzislau Rezki (Sony) } 2074e36176beSUladzislau Rezki (Sony) 2075e36176beSUladzislau Rezki (Sony) static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 2076e36176beSUladzislau Rezki (Sony) unsigned long flags, const void *caller) 2077e36176beSUladzislau Rezki (Sony) { 2078e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 2079e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vm, va, flags, caller); 2080c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2081f5252e00SMitsuo Hayasaka } 2082cf88c790STejun Heo 208320fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 2084f5252e00SMitsuo Hayasaka { 2085d4033afdSJoonsoo Kim /* 208620fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 2087d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 2088d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 2089d4033afdSJoonsoo Kim */ 2090d4033afdSJoonsoo Kim smp_wmb(); 209120fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 2092cf88c790STejun Heo } 2093cf88c790STejun Heo 2094db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 20952dca6999SDavid Miller unsigned long align, unsigned long flags, unsigned long start, 20965e6cafc8SMarek Szyprowski unsigned long end, int node, gfp_t gfp_mask, const void *caller) 2097db64fe02SNick Piggin { 20980006526dSKautuk Consul struct vmap_area *va; 2099db64fe02SNick Piggin struct vm_struct *area; 2100d98c9e83SAndrey Ryabinin unsigned long requested_size = size; 21011da177e4SLinus Torvalds 210252fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 21031da177e4SLinus Torvalds size = PAGE_ALIGN(size); 210431be8309SOGAWA Hirofumi if (unlikely(!size)) 210531be8309SOGAWA Hirofumi return NULL; 21061da177e4SLinus Torvalds 2107252e5c6eSzijun_hu if (flags & VM_IOREMAP) 2108252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 2109252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 2110252e5c6eSzijun_hu 2111cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 21121da177e4SLinus Torvalds if (unlikely(!area)) 21131da177e4SLinus Torvalds return NULL; 21141da177e4SLinus Torvalds 211571394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 21161da177e4SLinus Torvalds size += PAGE_SIZE; 21171da177e4SLinus Torvalds 2118db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 2119db64fe02SNick Piggin if (IS_ERR(va)) { 2120db64fe02SNick Piggin kfree(area); 2121db64fe02SNick Piggin return NULL; 21221da177e4SLinus Torvalds } 21231da177e4SLinus Torvalds 2124d98c9e83SAndrey Ryabinin kasan_unpoison_vmalloc((void *)va->va_start, requested_size); 2125f5252e00SMitsuo Hayasaka 2126d98c9e83SAndrey Ryabinin setup_vmalloc_vm(area, va, flags, caller); 21273c5c3cfbSDaniel Axtens 21281da177e4SLinus Torvalds return area; 21291da177e4SLinus Torvalds } 21301da177e4SLinus Torvalds 2131930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 2132930fc45aSChristoph Lameter unsigned long start, unsigned long end) 2133930fc45aSChristoph Lameter { 213400ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 213500ef2d2fSDavid Rientjes GFP_KERNEL, __builtin_return_address(0)); 2136930fc45aSChristoph Lameter } 21375992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 2138930fc45aSChristoph Lameter 2139c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 2140c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 21415e6cafc8SMarek Szyprowski const void *caller) 2142c2968612SBenjamin Herrenschmidt { 214300ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 214400ef2d2fSDavid Rientjes GFP_KERNEL, caller); 2145c2968612SBenjamin Herrenschmidt } 2146c2968612SBenjamin Herrenschmidt 21471da177e4SLinus Torvalds /** 2148183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 21491da177e4SLinus Torvalds * @size: size of the area 21501da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 21511da177e4SLinus Torvalds * 21521da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 21531da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 21541da177e4SLinus Torvalds * on success or %NULL on failure. 2155a862f68aSMike Rapoport * 2156a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure. 21571da177e4SLinus Torvalds */ 21581da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 21591da177e4SLinus Torvalds { 21602dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 216100ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 216200ef2d2fSDavid Rientjes __builtin_return_address(0)); 216323016969SChristoph Lameter } 216423016969SChristoph Lameter 216523016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 21665e6cafc8SMarek Szyprowski const void *caller) 216723016969SChristoph Lameter { 21682dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 216900ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 21701da177e4SLinus Torvalds } 21711da177e4SLinus Torvalds 2172e9da6e99SMarek Szyprowski /** 2173e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 2174e9da6e99SMarek Szyprowski * @addr: base address 2175e9da6e99SMarek Szyprowski * 2176e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 2177e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 2178e9da6e99SMarek Szyprowski * pointer valid. 2179a862f68aSMike Rapoport * 2180a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 2181e9da6e99SMarek Szyprowski */ 2182e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 218383342314SNick Piggin { 2184db64fe02SNick Piggin struct vmap_area *va; 218583342314SNick Piggin 2186db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 2187688fcbfcSPengfei Li if (!va) 21887856dfebSAndi Kleen return NULL; 2189688fcbfcSPengfei Li 2190688fcbfcSPengfei Li return va->vm; 21917856dfebSAndi Kleen } 21927856dfebSAndi Kleen 21931da177e4SLinus Torvalds /** 2194183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 21951da177e4SLinus Torvalds * @addr: base address 21961da177e4SLinus Torvalds * 21971da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 21981da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 21997856dfebSAndi Kleen * on SMP machines, except for its size or flags. 2200a862f68aSMike Rapoport * 2201a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 22021da177e4SLinus Torvalds */ 2203b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 22041da177e4SLinus Torvalds { 2205db64fe02SNick Piggin struct vmap_area *va; 2206db64fe02SNick Piggin 22075803ed29SChristoph Hellwig might_sleep(); 22085803ed29SChristoph Hellwig 2209dd3b8353SUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 2210dd3b8353SUladzislau Rezki (Sony) va = __find_vmap_area((unsigned long)addr); 2211688fcbfcSPengfei Li if (va && va->vm) { 2212db1aecafSMinchan Kim struct vm_struct *vm = va->vm; 2213f5252e00SMitsuo Hayasaka 2214c69480adSJoonsoo Kim va->vm = NULL; 2215c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2216c69480adSJoonsoo Kim 2217a5af5aa8SAndrey Ryabinin kasan_free_shadow(vm); 2218dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 2219dd32c279SKAMEZAWA Hiroyuki 2220db64fe02SNick Piggin return vm; 2221db64fe02SNick Piggin } 2222dd3b8353SUladzislau Rezki (Sony) 2223dd3b8353SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 2224db64fe02SNick Piggin return NULL; 22251da177e4SLinus Torvalds } 22261da177e4SLinus Torvalds 2227868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area, 2228868b104dSRick Edgecombe int (*set_direct_map)(struct page *page)) 2229868b104dSRick Edgecombe { 2230868b104dSRick Edgecombe int i; 2231868b104dSRick Edgecombe 2232868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) 2233868b104dSRick Edgecombe if (page_address(area->pages[i])) 2234868b104dSRick Edgecombe set_direct_map(area->pages[i]); 2235868b104dSRick Edgecombe } 2236868b104dSRick Edgecombe 2237868b104dSRick Edgecombe /* Handle removing and resetting vm mappings related to the vm_struct. */ 2238868b104dSRick Edgecombe static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2239868b104dSRick Edgecombe { 2240868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2241868b104dSRick Edgecombe int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 224231e67340SRick Edgecombe int flush_dmap = 0; 2243868b104dSRick Edgecombe int i; 2244868b104dSRick Edgecombe 2245868b104dSRick Edgecombe remove_vm_area(area->addr); 2246868b104dSRick Edgecombe 2247868b104dSRick Edgecombe /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 2248868b104dSRick Edgecombe if (!flush_reset) 2249868b104dSRick Edgecombe return; 2250868b104dSRick Edgecombe 2251868b104dSRick Edgecombe /* 2252868b104dSRick Edgecombe * If not deallocating pages, just do the flush of the VM area and 2253868b104dSRick Edgecombe * return. 2254868b104dSRick Edgecombe */ 2255868b104dSRick Edgecombe if (!deallocate_pages) { 2256868b104dSRick Edgecombe vm_unmap_aliases(); 2257868b104dSRick Edgecombe return; 2258868b104dSRick Edgecombe } 2259868b104dSRick Edgecombe 2260868b104dSRick Edgecombe /* 2261868b104dSRick Edgecombe * If execution gets here, flush the vm mapping and reset the direct 2262868b104dSRick Edgecombe * map. Find the start and end range of the direct mappings to make sure 2263868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map. 2264868b104dSRick Edgecombe */ 2265868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) { 22668e41f872SRick Edgecombe unsigned long addr = (unsigned long)page_address(area->pages[i]); 22678e41f872SRick Edgecombe if (addr) { 2268868b104dSRick Edgecombe start = min(addr, start); 22698e41f872SRick Edgecombe end = max(addr + PAGE_SIZE, end); 227031e67340SRick Edgecombe flush_dmap = 1; 2271868b104dSRick Edgecombe } 2272868b104dSRick Edgecombe } 2273868b104dSRick Edgecombe 2274868b104dSRick Edgecombe /* 2275868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if 2276868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and 2277868b104dSRick Edgecombe * reset the direct map permissions to the default. 2278868b104dSRick Edgecombe */ 2279868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush); 228031e67340SRick Edgecombe _vm_unmap_aliases(start, end, flush_dmap); 2281868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush); 2282868b104dSRick Edgecombe } 2283868b104dSRick Edgecombe 2284b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 22851da177e4SLinus Torvalds { 22861da177e4SLinus Torvalds struct vm_struct *area; 22871da177e4SLinus Torvalds 22881da177e4SLinus Torvalds if (!addr) 22891da177e4SLinus Torvalds return; 22901da177e4SLinus Torvalds 2291e69e9d4aSHATAYAMA Daisuke if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 2292ab15d9b4SDan Carpenter addr)) 22931da177e4SLinus Torvalds return; 22941da177e4SLinus Torvalds 22956ade2032SLiviu Dudau area = find_vm_area(addr); 22961da177e4SLinus Torvalds if (unlikely(!area)) { 22974c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 22981da177e4SLinus Torvalds addr); 22991da177e4SLinus Torvalds return; 23001da177e4SLinus Torvalds } 23011da177e4SLinus Torvalds 230205e3ff95SChintan Pandya debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 230305e3ff95SChintan Pandya debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 23049a11b49aSIngo Molnar 23053c5c3cfbSDaniel Axtens kasan_poison_vmalloc(area->addr, area->size); 23063c5c3cfbSDaniel Axtens 2307868b104dSRick Edgecombe vm_remove_mappings(area, deallocate_pages); 2308868b104dSRick Edgecombe 23091da177e4SLinus Torvalds if (deallocate_pages) { 23101da177e4SLinus Torvalds int i; 23111da177e4SLinus Torvalds 23121da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 2313bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 2314bf53d6f8SChristoph Lameter 2315bf53d6f8SChristoph Lameter BUG_ON(!page); 23164949148aSVladimir Davydov __free_pages(page, 0); 23171da177e4SLinus Torvalds } 231897105f0aSRoman Gushchin atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); 23191da177e4SLinus Torvalds 2320244d63eeSDavid Rientjes kvfree(area->pages); 23211da177e4SLinus Torvalds } 23221da177e4SLinus Torvalds 23231da177e4SLinus Torvalds kfree(area); 23241da177e4SLinus Torvalds return; 23251da177e4SLinus Torvalds } 23261da177e4SLinus Torvalds 2327bf22e37aSAndrey Ryabinin static inline void __vfree_deferred(const void *addr) 2328bf22e37aSAndrey Ryabinin { 2329bf22e37aSAndrey Ryabinin /* 2330bf22e37aSAndrey Ryabinin * Use raw_cpu_ptr() because this can be called from preemptible 2331bf22e37aSAndrey Ryabinin * context. Preemption is absolutely fine here, because the llist_add() 2332bf22e37aSAndrey Ryabinin * implementation is lockless, so it works even if we are adding to 2333bf22e37aSAndrey Ryabinin * nother cpu's list. schedule_work() should be fine with this too. 2334bf22e37aSAndrey Ryabinin */ 2335bf22e37aSAndrey Ryabinin struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 2336bf22e37aSAndrey Ryabinin 2337bf22e37aSAndrey Ryabinin if (llist_add((struct llist_node *)addr, &p->list)) 2338bf22e37aSAndrey Ryabinin schedule_work(&p->wq); 2339bf22e37aSAndrey Ryabinin } 2340bf22e37aSAndrey Ryabinin 2341bf22e37aSAndrey Ryabinin /** 2342bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 2343bf22e37aSAndrey Ryabinin * @addr: memory base address 2344bf22e37aSAndrey Ryabinin * 2345bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 2346bf22e37aSAndrey Ryabinin * except NMIs. 2347bf22e37aSAndrey Ryabinin */ 2348bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 2349bf22e37aSAndrey Ryabinin { 2350bf22e37aSAndrey Ryabinin BUG_ON(in_nmi()); 2351bf22e37aSAndrey Ryabinin 2352bf22e37aSAndrey Ryabinin kmemleak_free(addr); 2353bf22e37aSAndrey Ryabinin 2354bf22e37aSAndrey Ryabinin if (!addr) 2355bf22e37aSAndrey Ryabinin return; 2356bf22e37aSAndrey Ryabinin __vfree_deferred(addr); 2357bf22e37aSAndrey Ryabinin } 2358bf22e37aSAndrey Ryabinin 2359c67dc624SRoman Penyaev static void __vfree(const void *addr) 2360c67dc624SRoman Penyaev { 2361c67dc624SRoman Penyaev if (unlikely(in_interrupt())) 2362c67dc624SRoman Penyaev __vfree_deferred(addr); 2363c67dc624SRoman Penyaev else 2364c67dc624SRoman Penyaev __vunmap(addr, 1); 2365c67dc624SRoman Penyaev } 2366c67dc624SRoman Penyaev 23671da177e4SLinus Torvalds /** 23681da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 23691da177e4SLinus Torvalds * @addr: memory base address 23701da177e4SLinus Torvalds * 2371183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 237280e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 237380e93effSPekka Enberg * NULL, no operation is performed. 23741da177e4SLinus Torvalds * 237532fcfd40SAl Viro * Must not be called in NMI context (strictly speaking, only if we don't 237632fcfd40SAl Viro * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 237732fcfd40SAl Viro * conventions for vfree() arch-depenedent would be a really bad idea) 237832fcfd40SAl Viro * 23793ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context. 23803ca4ea3aSAndrey Ryabinin * 23810e056eb5Smchehab@s-opensource.com * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) 23821da177e4SLinus Torvalds */ 2383b3bdda02SChristoph Lameter void vfree(const void *addr) 23841da177e4SLinus Torvalds { 238532fcfd40SAl Viro BUG_ON(in_nmi()); 238689219d37SCatalin Marinas 238789219d37SCatalin Marinas kmemleak_free(addr); 238889219d37SCatalin Marinas 2389a8dda165SAndrey Ryabinin might_sleep_if(!in_interrupt()); 2390a8dda165SAndrey Ryabinin 239132fcfd40SAl Viro if (!addr) 239232fcfd40SAl Viro return; 2393c67dc624SRoman Penyaev 2394c67dc624SRoman Penyaev __vfree(addr); 23951da177e4SLinus Torvalds } 23961da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 23971da177e4SLinus Torvalds 23981da177e4SLinus Torvalds /** 23991da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 24001da177e4SLinus Torvalds * @addr: memory base address 24011da177e4SLinus Torvalds * 24021da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 24031da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 24041da177e4SLinus Torvalds * 240580e93effSPekka Enberg * Must not be called in interrupt context. 24061da177e4SLinus Torvalds */ 2407b3bdda02SChristoph Lameter void vunmap(const void *addr) 24081da177e4SLinus Torvalds { 24091da177e4SLinus Torvalds BUG_ON(in_interrupt()); 241034754b69SPeter Zijlstra might_sleep(); 241132fcfd40SAl Viro if (addr) 24121da177e4SLinus Torvalds __vunmap(addr, 0); 24131da177e4SLinus Torvalds } 24141da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 24151da177e4SLinus Torvalds 24161da177e4SLinus Torvalds /** 24171da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 24181da177e4SLinus Torvalds * @pages: array of page pointers 24191da177e4SLinus Torvalds * @count: number of pages to map 24201da177e4SLinus Torvalds * @flags: vm_area->flags 24211da177e4SLinus Torvalds * @prot: page protection for the mapping 24221da177e4SLinus Torvalds * 24231da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 24241da177e4SLinus Torvalds * space. 2425a862f68aSMike Rapoport * 2426a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 24271da177e4SLinus Torvalds */ 24281da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 24291da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 24301da177e4SLinus Torvalds { 24311da177e4SLinus Torvalds struct vm_struct *area; 243265ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 24331da177e4SLinus Torvalds 243434754b69SPeter Zijlstra might_sleep(); 243534754b69SPeter Zijlstra 2436ca79b0c2SArun KS if (count > totalram_pages()) 24371da177e4SLinus Torvalds return NULL; 24381da177e4SLinus Torvalds 243965ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 244065ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 24411da177e4SLinus Torvalds if (!area) 24421da177e4SLinus Torvalds return NULL; 244323016969SChristoph Lameter 2444f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) { 24451da177e4SLinus Torvalds vunmap(area->addr); 24461da177e4SLinus Torvalds return NULL; 24471da177e4SLinus Torvalds } 24481da177e4SLinus Torvalds 24491da177e4SLinus Torvalds return area->addr; 24501da177e4SLinus Torvalds } 24511da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 24521da177e4SLinus Torvalds 24538594a21cSMichal Hocko static void *__vmalloc_node(unsigned long size, unsigned long align, 24548594a21cSMichal Hocko gfp_t gfp_mask, pgprot_t prot, 24558594a21cSMichal Hocko int node, const void *caller); 2456e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 24573722e13cSWanpeng Li pgprot_t prot, int node) 24581da177e4SLinus Torvalds { 24591da177e4SLinus Torvalds struct page **pages; 24601da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 2461930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 2462704b862fSLaura Abbott const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; 2463704b862fSLaura Abbott const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? 2464704b862fSLaura Abbott 0 : 2465704b862fSLaura Abbott __GFP_HIGHMEM; 24661da177e4SLinus Torvalds 2467762216abSWanpeng Li nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 24681da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 24691da177e4SLinus Torvalds 24701da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 24718757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 2472704b862fSLaura Abbott pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, 24733722e13cSWanpeng Li PAGE_KERNEL, node, area->caller); 2474286e1ea3SAndrew Morton } else { 2475976d6dfbSJan Beulich pages = kmalloc_node(array_size, nested_gfp, node); 2476286e1ea3SAndrew Morton } 24777ea36242SAustin Kim 24787ea36242SAustin Kim if (!pages) { 24791da177e4SLinus Torvalds remove_vm_area(area->addr); 24801da177e4SLinus Torvalds kfree(area); 24811da177e4SLinus Torvalds return NULL; 24821da177e4SLinus Torvalds } 24831da177e4SLinus Torvalds 24847ea36242SAustin Kim area->pages = pages; 24857ea36242SAustin Kim area->nr_pages = nr_pages; 24867ea36242SAustin Kim 24871da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 2488bf53d6f8SChristoph Lameter struct page *page; 2489bf53d6f8SChristoph Lameter 24904b90951cSJianguo Wu if (node == NUMA_NO_NODE) 2491704b862fSLaura Abbott page = alloc_page(alloc_mask|highmem_mask); 2492930fc45aSChristoph Lameter else 2493704b862fSLaura Abbott page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); 2494bf53d6f8SChristoph Lameter 2495bf53d6f8SChristoph Lameter if (unlikely(!page)) { 24961da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 24971da177e4SLinus Torvalds area->nr_pages = i; 249897105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 24991da177e4SLinus Torvalds goto fail; 25001da177e4SLinus Torvalds } 2501bf53d6f8SChristoph Lameter area->pages[i] = page; 2502dcf61ff0SLiu Xiang if (gfpflags_allow_blocking(gfp_mask)) 2503660654f9SEric Dumazet cond_resched(); 25041da177e4SLinus Torvalds } 250597105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 25061da177e4SLinus Torvalds 2507f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) 25081da177e4SLinus Torvalds goto fail; 25091da177e4SLinus Torvalds return area->addr; 25101da177e4SLinus Torvalds 25111da177e4SLinus Torvalds fail: 2512a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 25137877cdccSMichal Hocko "vmalloc: allocation failure, allocated %ld of %ld bytes", 251422943ab1SDave Hansen (area->nr_pages*PAGE_SIZE), area->size); 2515c67dc624SRoman Penyaev __vfree(area->addr); 25161da177e4SLinus Torvalds return NULL; 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds 2519d0a21265SDavid Rientjes /** 2520d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 2521d0a21265SDavid Rientjes * @size: allocation size 2522d0a21265SDavid Rientjes * @align: desired alignment 2523d0a21265SDavid Rientjes * @start: vm area range start 2524d0a21265SDavid Rientjes * @end: vm area range end 2525d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 2526d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 2527cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 252800ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2529d0a21265SDavid Rientjes * @caller: caller's return address 2530d0a21265SDavid Rientjes * 2531d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 2532d0a21265SDavid Rientjes * allocator with @gfp_mask flags. Map them into contiguous 2533d0a21265SDavid Rientjes * kernel virtual space, using a pagetable protection of @prot. 2534a862f68aSMike Rapoport * 2535a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 2536d0a21265SDavid Rientjes */ 2537d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 2538d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 2539cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 2540cb9e3c29SAndrey Ryabinin const void *caller) 2541930fc45aSChristoph Lameter { 2542d0a21265SDavid Rientjes struct vm_struct *area; 2543d0a21265SDavid Rientjes void *addr; 2544d0a21265SDavid Rientjes unsigned long real_size = size; 2545d0a21265SDavid Rientjes 2546d0a21265SDavid Rientjes size = PAGE_ALIGN(size); 2547ca79b0c2SArun KS if (!size || (size >> PAGE_SHIFT) > totalram_pages()) 2548de7d2b56SJoe Perches goto fail; 2549d0a21265SDavid Rientjes 2550d98c9e83SAndrey Ryabinin area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED | 2551cb9e3c29SAndrey Ryabinin vm_flags, start, end, node, gfp_mask, caller); 2552d0a21265SDavid Rientjes if (!area) 2553de7d2b56SJoe Perches goto fail; 2554d0a21265SDavid Rientjes 25553722e13cSWanpeng Li addr = __vmalloc_area_node(area, gfp_mask, prot, node); 25561368edf0SMel Gorman if (!addr) 2557b82225f3SWanpeng Li return NULL; 255889219d37SCatalin Marinas 255989219d37SCatalin Marinas /* 256020fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 256120fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 25624341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 2563f5252e00SMitsuo Hayasaka */ 256420fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 2565f5252e00SMitsuo Hayasaka 256694f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask); 256789219d37SCatalin Marinas 256889219d37SCatalin Marinas return addr; 2569de7d2b56SJoe Perches 2570de7d2b56SJoe Perches fail: 2571a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 25727877cdccSMichal Hocko "vmalloc: allocation failure: %lu bytes", real_size); 2573de7d2b56SJoe Perches return NULL; 2574930fc45aSChristoph Lameter } 2575930fc45aSChristoph Lameter 2576153178edSUladzislau Rezki (Sony) /* 2577153178edSUladzislau Rezki (Sony) * This is only for performance analysis of vmalloc and stress purpose. 2578153178edSUladzislau Rezki (Sony) * It is required by vmalloc test module, therefore do not use it other 2579153178edSUladzislau Rezki (Sony) * than that. 2580153178edSUladzislau Rezki (Sony) */ 2581153178edSUladzislau Rezki (Sony) #ifdef CONFIG_TEST_VMALLOC_MODULE 2582153178edSUladzislau Rezki (Sony) EXPORT_SYMBOL_GPL(__vmalloc_node_range); 2583153178edSUladzislau Rezki (Sony) #endif 2584153178edSUladzislau Rezki (Sony) 25851da177e4SLinus Torvalds /** 2586930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 25871da177e4SLinus Torvalds * @size: allocation size 25882dca6999SDavid Miller * @align: desired alignment 25891da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 25901da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 259100ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2592c85d194bSRandy Dunlap * @caller: caller's return address 25931da177e4SLinus Torvalds * 25941da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 25951da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 25961da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 2597a7c3e901SMichal Hocko * 2598dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 2599a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported 2600a7c3e901SMichal Hocko * 2601a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted 2602a7c3e901SMichal Hocko * with mm people. 2603a862f68aSMike Rapoport * 2604a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 26051da177e4SLinus Torvalds */ 26068594a21cSMichal Hocko static void *__vmalloc_node(unsigned long size, unsigned long align, 26072dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 26085e6cafc8SMarek Szyprowski int node, const void *caller) 26091da177e4SLinus Torvalds { 2610d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 2611cb9e3c29SAndrey Ryabinin gfp_mask, prot, 0, node, caller); 26121da177e4SLinus Torvalds } 26131da177e4SLinus Torvalds 2614930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 2615930fc45aSChristoph Lameter { 261600ef2d2fSDavid Rientjes return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 261723016969SChristoph Lameter __builtin_return_address(0)); 2618930fc45aSChristoph Lameter } 26191da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 26201da177e4SLinus Torvalds 26218594a21cSMichal Hocko static inline void *__vmalloc_node_flags(unsigned long size, 26228594a21cSMichal Hocko int node, gfp_t flags) 26238594a21cSMichal Hocko { 26248594a21cSMichal Hocko return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 26258594a21cSMichal Hocko node, __builtin_return_address(0)); 26268594a21cSMichal Hocko } 26278594a21cSMichal Hocko 26288594a21cSMichal Hocko 26298594a21cSMichal Hocko void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, 26308594a21cSMichal Hocko void *caller) 26318594a21cSMichal Hocko { 26328594a21cSMichal Hocko return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller); 26338594a21cSMichal Hocko } 26348594a21cSMichal Hocko 26351da177e4SLinus Torvalds /** 26361da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 26371da177e4SLinus Torvalds * @size: allocation size 263892eac168SMike Rapoport * 26391da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 26401da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 26411da177e4SLinus Torvalds * 2642c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 26431da177e4SLinus Torvalds * use __vmalloc() instead. 2644a862f68aSMike Rapoport * 2645a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 26461da177e4SLinus Torvalds */ 26471da177e4SLinus Torvalds void *vmalloc(unsigned long size) 26481da177e4SLinus Torvalds { 264900ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 265019809c2dSMichal Hocko GFP_KERNEL); 26511da177e4SLinus Torvalds } 26521da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 26531da177e4SLinus Torvalds 2654930fc45aSChristoph Lameter /** 2655e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 2656e1ca7788SDave Young * @size: allocation size 265792eac168SMike Rapoport * 2658e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 2659e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 2660e1ca7788SDave Young * The memory allocated is set to zero. 2661e1ca7788SDave Young * 2662e1ca7788SDave Young * For tight control over page level allocator and protection flags 2663e1ca7788SDave Young * use __vmalloc() instead. 2664a862f68aSMike Rapoport * 2665a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2666e1ca7788SDave Young */ 2667e1ca7788SDave Young void *vzalloc(unsigned long size) 2668e1ca7788SDave Young { 266900ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 267019809c2dSMichal Hocko GFP_KERNEL | __GFP_ZERO); 2671e1ca7788SDave Young } 2672e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 2673e1ca7788SDave Young 2674e1ca7788SDave Young /** 2675ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 267683342314SNick Piggin * @size: allocation size 2677ead04089SRolf Eike Beer * 2678ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 2679ead04089SRolf Eike Beer * without leaking data. 2680a862f68aSMike Rapoport * 2681a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 268283342314SNick Piggin */ 268383342314SNick Piggin void *vmalloc_user(unsigned long size) 268483342314SNick Piggin { 2685bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2686bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 2687bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 268800ef2d2fSDavid Rientjes __builtin_return_address(0)); 268983342314SNick Piggin } 269083342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 269183342314SNick Piggin 269283342314SNick Piggin /** 2693930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 2694930fc45aSChristoph Lameter * @size: allocation size 2695d44e0780SRandy Dunlap * @node: numa node 2696930fc45aSChristoph Lameter * 2697930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 2698930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 2699930fc45aSChristoph Lameter * 2700c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 2701930fc45aSChristoph Lameter * use __vmalloc() instead. 2702a862f68aSMike Rapoport * 2703a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2704930fc45aSChristoph Lameter */ 2705930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 2706930fc45aSChristoph Lameter { 270719809c2dSMichal Hocko return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, 270823016969SChristoph Lameter node, __builtin_return_address(0)); 2709930fc45aSChristoph Lameter } 2710930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 2711930fc45aSChristoph Lameter 2712e1ca7788SDave Young /** 2713e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 2714e1ca7788SDave Young * @size: allocation size 2715e1ca7788SDave Young * @node: numa node 2716e1ca7788SDave Young * 2717e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 2718e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 2719e1ca7788SDave Young * The memory allocated is set to zero. 2720e1ca7788SDave Young * 2721e1ca7788SDave Young * For tight control over page level allocator and protection flags 2722e1ca7788SDave Young * use __vmalloc_node() instead. 2723a862f68aSMike Rapoport * 2724a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2725e1ca7788SDave Young */ 2726e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 2727e1ca7788SDave Young { 2728e1ca7788SDave Young return __vmalloc_node_flags(size, node, 272919809c2dSMichal Hocko GFP_KERNEL | __GFP_ZERO); 2730e1ca7788SDave Young } 2731e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 2732e1ca7788SDave Young 27331da177e4SLinus Torvalds /** 2734fc970227SAndrii Nakryiko * vmalloc_user_node_flags - allocate memory for userspace on a specific node 2735fc970227SAndrii Nakryiko * @size: allocation size 2736fc970227SAndrii Nakryiko * @node: numa node 2737fc970227SAndrii Nakryiko * @flags: flags for the page level allocator 2738fc970227SAndrii Nakryiko * 2739fc970227SAndrii Nakryiko * The resulting memory area is zeroed so it can be mapped to userspace 2740fc970227SAndrii Nakryiko * without leaking data. 2741fc970227SAndrii Nakryiko * 2742fc970227SAndrii Nakryiko * Return: pointer to the allocated memory or %NULL on error 2743fc970227SAndrii Nakryiko */ 2744fc970227SAndrii Nakryiko void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags) 2745fc970227SAndrii Nakryiko { 2746fc970227SAndrii Nakryiko return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2747fc970227SAndrii Nakryiko flags | __GFP_ZERO, PAGE_KERNEL, 2748fc970227SAndrii Nakryiko VM_USERMAP, node, 2749fc970227SAndrii Nakryiko __builtin_return_address(0)); 2750fc970227SAndrii Nakryiko } 2751fc970227SAndrii Nakryiko EXPORT_SYMBOL(vmalloc_user_node_flags); 2752fc970227SAndrii Nakryiko 2753fc970227SAndrii Nakryiko /** 27541da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 27551da177e4SLinus Torvalds * @size: allocation size 27561da177e4SLinus Torvalds * 27571da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 27581da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 27591da177e4SLinus Torvalds * executable kernel virtual space. 27601da177e4SLinus Torvalds * 2761c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 27621da177e4SLinus Torvalds * use __vmalloc() instead. 2763a862f68aSMike Rapoport * 2764a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 27651da177e4SLinus Torvalds */ 27661da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 27671da177e4SLinus Torvalds { 2768868b104dSRick Edgecombe return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 2769868b104dSRick Edgecombe GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, 277000ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 27711da177e4SLinus Torvalds } 27721da177e4SLinus Torvalds 27730d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 2774698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 27750d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 2776698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 27770d08e0d3SAndi Kleen #else 2778698d0831SMichal Hocko /* 2779698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others 2780698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone. 2781698d0831SMichal Hocko */ 2782698d0831SMichal Hocko #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 27830d08e0d3SAndi Kleen #endif 27840d08e0d3SAndi Kleen 27851da177e4SLinus Torvalds /** 27861da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 27871da177e4SLinus Torvalds * @size: allocation size 27881da177e4SLinus Torvalds * 27891da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 27901da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 2791a862f68aSMike Rapoport * 2792a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 27931da177e4SLinus Torvalds */ 27941da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 27951da177e4SLinus Torvalds { 27962dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 279700ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 27981da177e4SLinus Torvalds } 27991da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 28001da177e4SLinus Torvalds 280183342314SNick Piggin /** 2802ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 280383342314SNick Piggin * @size: allocation size 2804ead04089SRolf Eike Beer * 2805ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 2806ead04089SRolf Eike Beer * mapped to userspace without leaking data. 2807a862f68aSMike Rapoport * 2808a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 280983342314SNick Piggin */ 281083342314SNick Piggin void *vmalloc_32_user(unsigned long size) 281183342314SNick Piggin { 2812bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2813bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 2814bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 28155a82ac71SRoman Penyaev __builtin_return_address(0)); 281683342314SNick Piggin } 281783342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 281883342314SNick Piggin 2819d0107eb0SKAMEZAWA Hiroyuki /* 2820d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 2821d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 2822d0107eb0SKAMEZAWA Hiroyuki */ 2823d0107eb0SKAMEZAWA Hiroyuki 2824d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 2825d0107eb0SKAMEZAWA Hiroyuki { 2826d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2827d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2828d0107eb0SKAMEZAWA Hiroyuki 2829d0107eb0SKAMEZAWA Hiroyuki while (count) { 2830d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2831d0107eb0SKAMEZAWA Hiroyuki 2832891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2833d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2834d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2835d0107eb0SKAMEZAWA Hiroyuki length = count; 2836d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2837d0107eb0SKAMEZAWA Hiroyuki /* 2838d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2839d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2840d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2841d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2842d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2843d0107eb0SKAMEZAWA Hiroyuki */ 2844d0107eb0SKAMEZAWA Hiroyuki if (p) { 2845d0107eb0SKAMEZAWA Hiroyuki /* 2846d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2847d0107eb0SKAMEZAWA Hiroyuki * function description) 2848d0107eb0SKAMEZAWA Hiroyuki */ 28499b04c5feSCong Wang void *map = kmap_atomic(p); 2850d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 28519b04c5feSCong Wang kunmap_atomic(map); 2852d0107eb0SKAMEZAWA Hiroyuki } else 2853d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 2854d0107eb0SKAMEZAWA Hiroyuki 2855d0107eb0SKAMEZAWA Hiroyuki addr += length; 2856d0107eb0SKAMEZAWA Hiroyuki buf += length; 2857d0107eb0SKAMEZAWA Hiroyuki copied += length; 2858d0107eb0SKAMEZAWA Hiroyuki count -= length; 2859d0107eb0SKAMEZAWA Hiroyuki } 2860d0107eb0SKAMEZAWA Hiroyuki return copied; 2861d0107eb0SKAMEZAWA Hiroyuki } 2862d0107eb0SKAMEZAWA Hiroyuki 2863d0107eb0SKAMEZAWA Hiroyuki static int aligned_vwrite(char *buf, char *addr, unsigned long count) 2864d0107eb0SKAMEZAWA Hiroyuki { 2865d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2866d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2867d0107eb0SKAMEZAWA Hiroyuki 2868d0107eb0SKAMEZAWA Hiroyuki while (count) { 2869d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2870d0107eb0SKAMEZAWA Hiroyuki 2871891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2872d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2873d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2874d0107eb0SKAMEZAWA Hiroyuki length = count; 2875d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2876d0107eb0SKAMEZAWA Hiroyuki /* 2877d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2878d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2879d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2880d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2881d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2882d0107eb0SKAMEZAWA Hiroyuki */ 2883d0107eb0SKAMEZAWA Hiroyuki if (p) { 2884d0107eb0SKAMEZAWA Hiroyuki /* 2885d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2886d0107eb0SKAMEZAWA Hiroyuki * function description) 2887d0107eb0SKAMEZAWA Hiroyuki */ 28889b04c5feSCong Wang void *map = kmap_atomic(p); 2889d0107eb0SKAMEZAWA Hiroyuki memcpy(map + offset, buf, length); 28909b04c5feSCong Wang kunmap_atomic(map); 2891d0107eb0SKAMEZAWA Hiroyuki } 2892d0107eb0SKAMEZAWA Hiroyuki addr += length; 2893d0107eb0SKAMEZAWA Hiroyuki buf += length; 2894d0107eb0SKAMEZAWA Hiroyuki copied += length; 2895d0107eb0SKAMEZAWA Hiroyuki count -= length; 2896d0107eb0SKAMEZAWA Hiroyuki } 2897d0107eb0SKAMEZAWA Hiroyuki return copied; 2898d0107eb0SKAMEZAWA Hiroyuki } 2899d0107eb0SKAMEZAWA Hiroyuki 2900d0107eb0SKAMEZAWA Hiroyuki /** 2901d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 2902d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 2903d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2904d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2905d0107eb0SKAMEZAWA Hiroyuki * 2906d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2907d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 2908d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 2909d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 2910d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2911d0107eb0SKAMEZAWA Hiroyuki * 2912d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2913a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2914d0107eb0SKAMEZAWA Hiroyuki * 2915d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 2916d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2917d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2918d9009d67SGeert Uytterhoeven * any information, as /dev/kmem. 2919a862f68aSMike Rapoport * 2920a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased 2921a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't 2922a862f68aSMike Rapoport * include any intersection with valid vmalloc area 2923d0107eb0SKAMEZAWA Hiroyuki */ 29241da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 29251da177e4SLinus Torvalds { 2926e81ce85fSJoonsoo Kim struct vmap_area *va; 2927e81ce85fSJoonsoo Kim struct vm_struct *vm; 29281da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 2929d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 29301da177e4SLinus Torvalds unsigned long n; 29311da177e4SLinus Torvalds 29321da177e4SLinus Torvalds /* Don't allow overflow */ 29331da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 29341da177e4SLinus Torvalds count = -(unsigned long) addr; 29351da177e4SLinus Torvalds 2936e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2937e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2938e81ce85fSJoonsoo Kim if (!count) 2939e81ce85fSJoonsoo Kim break; 2940e81ce85fSJoonsoo Kim 2941688fcbfcSPengfei Li if (!va->vm) 2942e81ce85fSJoonsoo Kim continue; 2943e81ce85fSJoonsoo Kim 2944e81ce85fSJoonsoo Kim vm = va->vm; 2945e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2946762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 29471da177e4SLinus Torvalds continue; 29481da177e4SLinus Torvalds while (addr < vaddr) { 29491da177e4SLinus Torvalds if (count == 0) 29501da177e4SLinus Torvalds goto finished; 29511da177e4SLinus Torvalds *buf = '\0'; 29521da177e4SLinus Torvalds buf++; 29531da177e4SLinus Torvalds addr++; 29541da177e4SLinus Torvalds count--; 29551da177e4SLinus Torvalds } 2956762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2957d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2958d0107eb0SKAMEZAWA Hiroyuki n = count; 2959e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) 2960d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 2961d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 2962d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 2963d0107eb0SKAMEZAWA Hiroyuki buf += n; 2964d0107eb0SKAMEZAWA Hiroyuki addr += n; 2965d0107eb0SKAMEZAWA Hiroyuki count -= n; 29661da177e4SLinus Torvalds } 29671da177e4SLinus Torvalds finished: 2968e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2969d0107eb0SKAMEZAWA Hiroyuki 2970d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 2971d0107eb0SKAMEZAWA Hiroyuki return 0; 2972d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 2973d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 2974d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 2975d0107eb0SKAMEZAWA Hiroyuki 2976d0107eb0SKAMEZAWA Hiroyuki return buflen; 29771da177e4SLinus Torvalds } 29781da177e4SLinus Torvalds 2979d0107eb0SKAMEZAWA Hiroyuki /** 2980d0107eb0SKAMEZAWA Hiroyuki * vwrite() - write vmalloc area in a safe way. 2981d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for source data 2982d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2983d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2984d0107eb0SKAMEZAWA Hiroyuki * 2985d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2986d0107eb0SKAMEZAWA Hiroyuki * copy data from a buffer to the given addr. If specified range of 2987d0107eb0SKAMEZAWA Hiroyuki * [addr...addr+count) includes some valid address, data is copied from 2988d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, no copy to hole. 2989d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2990d0107eb0SKAMEZAWA Hiroyuki * 2991d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2992a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2993d0107eb0SKAMEZAWA Hiroyuki * 2994d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vwrite() is never necessary because the caller 2995d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2996d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2997d9009d67SGeert Uytterhoeven * any information, as /dev/kmem. 2998a862f68aSMike Rapoport * 2999a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be 3000a862f68aSMike Rapoport * increased (same number as @count) or %0 if [addr...addr+count) 3001a862f68aSMike Rapoport * doesn't include any intersection with valid vmalloc area 3002d0107eb0SKAMEZAWA Hiroyuki */ 30031da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 30041da177e4SLinus Torvalds { 3005e81ce85fSJoonsoo Kim struct vmap_area *va; 3006e81ce85fSJoonsoo Kim struct vm_struct *vm; 3007d0107eb0SKAMEZAWA Hiroyuki char *vaddr; 3008d0107eb0SKAMEZAWA Hiroyuki unsigned long n, buflen; 3009d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 30101da177e4SLinus Torvalds 30111da177e4SLinus Torvalds /* Don't allow overflow */ 30121da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 30131da177e4SLinus Torvalds count = -(unsigned long) addr; 3014d0107eb0SKAMEZAWA Hiroyuki buflen = count; 30151da177e4SLinus Torvalds 3016e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 3017e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 3018e81ce85fSJoonsoo Kim if (!count) 3019e81ce85fSJoonsoo Kim break; 3020e81ce85fSJoonsoo Kim 3021688fcbfcSPengfei Li if (!va->vm) 3022e81ce85fSJoonsoo Kim continue; 3023e81ce85fSJoonsoo Kim 3024e81ce85fSJoonsoo Kim vm = va->vm; 3025e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 3026762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 30271da177e4SLinus Torvalds continue; 30281da177e4SLinus Torvalds while (addr < vaddr) { 30291da177e4SLinus Torvalds if (count == 0) 30301da177e4SLinus Torvalds goto finished; 30311da177e4SLinus Torvalds buf++; 30321da177e4SLinus Torvalds addr++; 30331da177e4SLinus Torvalds count--; 30341da177e4SLinus Torvalds } 3035762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 3036d0107eb0SKAMEZAWA Hiroyuki if (n > count) 3037d0107eb0SKAMEZAWA Hiroyuki n = count; 3038e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) { 3039d0107eb0SKAMEZAWA Hiroyuki aligned_vwrite(buf, addr, n); 3040d0107eb0SKAMEZAWA Hiroyuki copied++; 3041d0107eb0SKAMEZAWA Hiroyuki } 3042d0107eb0SKAMEZAWA Hiroyuki buf += n; 3043d0107eb0SKAMEZAWA Hiroyuki addr += n; 3044d0107eb0SKAMEZAWA Hiroyuki count -= n; 30451da177e4SLinus Torvalds } 30461da177e4SLinus Torvalds finished: 3047e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 3048d0107eb0SKAMEZAWA Hiroyuki if (!copied) 3049d0107eb0SKAMEZAWA Hiroyuki return 0; 3050d0107eb0SKAMEZAWA Hiroyuki return buflen; 30511da177e4SLinus Torvalds } 305283342314SNick Piggin 305383342314SNick Piggin /** 3054e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 3055e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 3056e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 3057e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 3058*bdebd6a2SJann Horn * @pgoff: offset from @kaddr to start at 3059e69e9d4aSHATAYAMA Daisuke * @size: size of map area 3060e69e9d4aSHATAYAMA Daisuke * 3061e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 3062e69e9d4aSHATAYAMA Daisuke * 3063e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 3064e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 3065e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 3066e69e9d4aSHATAYAMA Daisuke * met. 3067e69e9d4aSHATAYAMA Daisuke * 3068e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 3069e69e9d4aSHATAYAMA Daisuke */ 3070e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 3071*bdebd6a2SJann Horn void *kaddr, unsigned long pgoff, 3072*bdebd6a2SJann Horn unsigned long size) 3073e69e9d4aSHATAYAMA Daisuke { 3074e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 3075*bdebd6a2SJann Horn unsigned long off; 3076*bdebd6a2SJann Horn unsigned long end_index; 3077*bdebd6a2SJann Horn 3078*bdebd6a2SJann Horn if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 3079*bdebd6a2SJann Horn return -EINVAL; 3080e69e9d4aSHATAYAMA Daisuke 3081e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 3082e69e9d4aSHATAYAMA Daisuke 3083e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 3084e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3085e69e9d4aSHATAYAMA Daisuke 3086e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 3087e69e9d4aSHATAYAMA Daisuke if (!area) 3088e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3089e69e9d4aSHATAYAMA Daisuke 3090fe9041c2SChristoph Hellwig if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 3091e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3092e69e9d4aSHATAYAMA Daisuke 3093*bdebd6a2SJann Horn if (check_add_overflow(size, off, &end_index) || 3094*bdebd6a2SJann Horn end_index > get_vm_area_size(area)) 3095e69e9d4aSHATAYAMA Daisuke return -EINVAL; 3096*bdebd6a2SJann Horn kaddr += off; 3097e69e9d4aSHATAYAMA Daisuke 3098e69e9d4aSHATAYAMA Daisuke do { 3099e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 3100e69e9d4aSHATAYAMA Daisuke int ret; 3101e69e9d4aSHATAYAMA Daisuke 3102e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 3103e69e9d4aSHATAYAMA Daisuke if (ret) 3104e69e9d4aSHATAYAMA Daisuke return ret; 3105e69e9d4aSHATAYAMA Daisuke 3106e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 3107e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 3108e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 3109e69e9d4aSHATAYAMA Daisuke } while (size > 0); 3110e69e9d4aSHATAYAMA Daisuke 3111e69e9d4aSHATAYAMA Daisuke vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3112e69e9d4aSHATAYAMA Daisuke 3113e69e9d4aSHATAYAMA Daisuke return 0; 3114e69e9d4aSHATAYAMA Daisuke } 3115e69e9d4aSHATAYAMA Daisuke EXPORT_SYMBOL(remap_vmalloc_range_partial); 3116e69e9d4aSHATAYAMA Daisuke 3117e69e9d4aSHATAYAMA Daisuke /** 311883342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 311983342314SNick Piggin * @vma: vma to cover (map full range of vma) 312083342314SNick Piggin * @addr: vmalloc memory 312183342314SNick Piggin * @pgoff: number of pages into addr before first page to map 31227682486bSRandy Dunlap * 31237682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 312483342314SNick Piggin * 312583342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 312683342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 312783342314SNick Piggin * that criteria isn't met. 312883342314SNick Piggin * 312972fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 313083342314SNick Piggin */ 313183342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 313283342314SNick Piggin unsigned long pgoff) 313383342314SNick Piggin { 3134e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 3135*bdebd6a2SJann Horn addr, pgoff, 3136e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 313783342314SNick Piggin } 313883342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 313983342314SNick Piggin 31401eeb66a1SChristoph Hellwig /* 3141763802b5SJoerg Roedel * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose 3142763802b5SJoerg Roedel * not to have one. 31433f8fd02bSJoerg Roedel * 31443f8fd02bSJoerg Roedel * The purpose of this function is to make sure the vmalloc area 31453f8fd02bSJoerg Roedel * mappings are identical in all page-tables in the system. 31461eeb66a1SChristoph Hellwig */ 3147763802b5SJoerg Roedel void __weak vmalloc_sync_mappings(void) 31481eeb66a1SChristoph Hellwig { 31491eeb66a1SChristoph Hellwig } 31505f4352fbSJeremy Fitzhardinge 3151763802b5SJoerg Roedel void __weak vmalloc_sync_unmappings(void) 3152763802b5SJoerg Roedel { 3153763802b5SJoerg Roedel } 31545f4352fbSJeremy Fitzhardinge 31558b1e0f81SAnshuman Khandual static int f(pte_t *pte, unsigned long addr, void *data) 31565f4352fbSJeremy Fitzhardinge { 3157cd12909cSDavid Vrabel pte_t ***p = data; 3158cd12909cSDavid Vrabel 3159cd12909cSDavid Vrabel if (p) { 3160cd12909cSDavid Vrabel *(*p) = pte; 3161cd12909cSDavid Vrabel (*p)++; 3162cd12909cSDavid Vrabel } 31635f4352fbSJeremy Fitzhardinge return 0; 31645f4352fbSJeremy Fitzhardinge } 31655f4352fbSJeremy Fitzhardinge 31665f4352fbSJeremy Fitzhardinge /** 31675f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 31685f4352fbSJeremy Fitzhardinge * @size: size of the area 3169cd12909cSDavid Vrabel * @ptes: returns the PTEs for the address space 31707682486bSRandy Dunlap * 31717682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 31725f4352fbSJeremy Fitzhardinge * 31735f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 31745f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 3175cd12909cSDavid Vrabel * are created. 3176cd12909cSDavid Vrabel * 3177cd12909cSDavid Vrabel * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 3178cd12909cSDavid Vrabel * allocated for the VM area are returned. 31795f4352fbSJeremy Fitzhardinge */ 3180cd12909cSDavid Vrabel struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 31815f4352fbSJeremy Fitzhardinge { 31825f4352fbSJeremy Fitzhardinge struct vm_struct *area; 31835f4352fbSJeremy Fitzhardinge 318423016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 318523016969SChristoph Lameter __builtin_return_address(0)); 31865f4352fbSJeremy Fitzhardinge if (area == NULL) 31875f4352fbSJeremy Fitzhardinge return NULL; 31885f4352fbSJeremy Fitzhardinge 31895f4352fbSJeremy Fitzhardinge /* 31905f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 31915f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 31925f4352fbSJeremy Fitzhardinge */ 31935f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 3194cd12909cSDavid Vrabel size, f, ptes ? &ptes : NULL)) { 31955f4352fbSJeremy Fitzhardinge free_vm_area(area); 31965f4352fbSJeremy Fitzhardinge return NULL; 31975f4352fbSJeremy Fitzhardinge } 31985f4352fbSJeremy Fitzhardinge 31995f4352fbSJeremy Fitzhardinge return area; 32005f4352fbSJeremy Fitzhardinge } 32015f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 32025f4352fbSJeremy Fitzhardinge 32035f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 32045f4352fbSJeremy Fitzhardinge { 32055f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 32065f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 32075f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 32085f4352fbSJeremy Fitzhardinge kfree(area); 32095f4352fbSJeremy Fitzhardinge } 32105f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 3211a10aa579SChristoph Lameter 32124f8b02b4STejun Heo #ifdef CONFIG_SMP 3213ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 3214ca23e405STejun Heo { 32154583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 3216ca23e405STejun Heo } 3217ca23e405STejun Heo 3218ca23e405STejun Heo /** 321968ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 322068ad4a33SUladzislau Rezki (Sony) * @addr: target address 3221ca23e405STejun Heo * 322268ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area 322368ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned 322468ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL 322568ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr. 3226ca23e405STejun Heo */ 322768ad4a33SUladzislau Rezki (Sony) static struct vmap_area * 322868ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr) 3229ca23e405STejun Heo { 323068ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp; 323168ad4a33SUladzislau Rezki (Sony) struct rb_node *n; 323268ad4a33SUladzislau Rezki (Sony) 323368ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node; 323468ad4a33SUladzislau Rezki (Sony) va = NULL; 3235ca23e405STejun Heo 3236ca23e405STejun Heo while (n) { 323768ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node); 323868ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) { 323968ad4a33SUladzislau Rezki (Sony) va = tmp; 324068ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr) 3241ca23e405STejun Heo break; 3242ca23e405STejun Heo 324368ad4a33SUladzislau Rezki (Sony) n = n->rb_right; 3244ca23e405STejun Heo } else { 324568ad4a33SUladzislau Rezki (Sony) n = n->rb_left; 3246ca23e405STejun Heo } 324768ad4a33SUladzislau Rezki (Sony) } 324868ad4a33SUladzislau Rezki (Sony) 324968ad4a33SUladzislau Rezki (Sony) return va; 3250ca23e405STejun Heo } 3251ca23e405STejun Heo 3252ca23e405STejun Heo /** 325368ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address 325468ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END 325568ad4a33SUladzislau Rezki (Sony) * @va: 325668ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order); 325768ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address. 3258ca23e405STejun Heo * 325968ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area 3260ca23e405STejun Heo */ 326168ad4a33SUladzislau Rezki (Sony) static unsigned long 326268ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 3263ca23e405STejun Heo { 326468ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3265ca23e405STejun Heo unsigned long addr; 3266ca23e405STejun Heo 326768ad4a33SUladzislau Rezki (Sony) if (likely(*va)) { 326868ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va), 326968ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) { 327068ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 327168ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr) 327268ad4a33SUladzislau Rezki (Sony) return addr; 327368ad4a33SUladzislau Rezki (Sony) } 3274ca23e405STejun Heo } 3275ca23e405STejun Heo 327668ad4a33SUladzislau Rezki (Sony) return 0; 3277ca23e405STejun Heo } 3278ca23e405STejun Heo 3279ca23e405STejun Heo /** 3280ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 3281ca23e405STejun Heo * @offsets: array containing offset of each area 3282ca23e405STejun Heo * @sizes: array containing size of each area 3283ca23e405STejun Heo * @nr_vms: the number of areas to allocate 3284ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 3285ca23e405STejun Heo * 3286ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 3287ca23e405STejun Heo * vm_structs on success, %NULL on failure 3288ca23e405STejun Heo * 3289ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 3290ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 3291ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 3292ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 3293ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 3294ec3f64fcSDavid Rientjes * areas are allocated from top. 3295ca23e405STejun Heo * 3296ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 329768ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking 329868ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the 329968ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till 330068ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted 330168ad4a33SUladzislau Rezki (Sony) * and the result is returned. 3302ca23e405STejun Heo */ 3303ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 3304ca23e405STejun Heo const size_t *sizes, int nr_vms, 3305ec3f64fcSDavid Rientjes size_t align) 3306ca23e405STejun Heo { 3307ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 3308ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 330968ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va; 3310ca23e405STejun Heo struct vm_struct **vms; 3311ca23e405STejun Heo int area, area2, last_area, term_area; 3312253a496dSDaniel Axtens unsigned long base, start, size, end, last_end, orig_start, orig_end; 3313ca23e405STejun Heo bool purged = false; 331468ad4a33SUladzislau Rezki (Sony) enum fit_type type; 3315ca23e405STejun Heo 3316ca23e405STejun Heo /* verify parameters and allocate data structures */ 3317891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 3318ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 3319ca23e405STejun Heo start = offsets[area]; 3320ca23e405STejun Heo end = start + sizes[area]; 3321ca23e405STejun Heo 3322ca23e405STejun Heo /* is everything aligned properly? */ 3323ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 3324ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 3325ca23e405STejun Heo 3326ca23e405STejun Heo /* detect the area with the highest address */ 3327ca23e405STejun Heo if (start > offsets[last_area]) 3328ca23e405STejun Heo last_area = area; 3329ca23e405STejun Heo 3330c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) { 3331ca23e405STejun Heo unsigned long start2 = offsets[area2]; 3332ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 3333ca23e405STejun Heo 3334c568da28SWei Yang BUG_ON(start2 < end && start < end2); 3335ca23e405STejun Heo } 3336ca23e405STejun Heo } 3337ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 3338ca23e405STejun Heo 3339ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 3340ca23e405STejun Heo WARN_ON(true); 3341ca23e405STejun Heo return NULL; 3342ca23e405STejun Heo } 3343ca23e405STejun Heo 33444d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 33454d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 3346ca23e405STejun Heo if (!vas || !vms) 3347f1db7afdSKautuk Consul goto err_free2; 3348ca23e405STejun Heo 3349ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 335068ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 3351ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 3352ca23e405STejun Heo if (!vas[area] || !vms[area]) 3353ca23e405STejun Heo goto err_free; 3354ca23e405STejun Heo } 3355ca23e405STejun Heo retry: 3356e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 3357ca23e405STejun Heo 3358ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 3359ca23e405STejun Heo area = term_area = last_area; 3360ca23e405STejun Heo start = offsets[area]; 3361ca23e405STejun Heo end = start + sizes[area]; 3362ca23e405STejun Heo 336368ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end); 336468ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3365ca23e405STejun Heo 3366ca23e405STejun Heo while (true) { 3367ca23e405STejun Heo /* 3368ca23e405STejun Heo * base might have underflowed, add last_end before 3369ca23e405STejun Heo * comparing. 3370ca23e405STejun Heo */ 337168ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end) 337268ad4a33SUladzislau Rezki (Sony) goto overflow; 3373ca23e405STejun Heo 3374ca23e405STejun Heo /* 337568ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found. 3376ca23e405STejun Heo */ 337768ad4a33SUladzislau Rezki (Sony) if (va == NULL) 337868ad4a33SUladzislau Rezki (Sony) goto overflow; 3379ca23e405STejun Heo 3380ca23e405STejun Heo /* 3381d8cc323dSQiujun Huang * If required width exceeds current VA block, move 33825336e52cSKuppuswamy Sathyanarayanan * base downwards and then recheck. 33835336e52cSKuppuswamy Sathyanarayanan */ 33845336e52cSKuppuswamy Sathyanarayanan if (base + end > va->va_end) { 33855336e52cSKuppuswamy Sathyanarayanan base = pvm_determine_end_from_reverse(&va, align) - end; 33865336e52cSKuppuswamy Sathyanarayanan term_area = area; 33875336e52cSKuppuswamy Sathyanarayanan continue; 33885336e52cSKuppuswamy Sathyanarayanan } 33895336e52cSKuppuswamy Sathyanarayanan 33905336e52cSKuppuswamy Sathyanarayanan /* 339168ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck. 3392ca23e405STejun Heo */ 33935336e52cSKuppuswamy Sathyanarayanan if (base + start < va->va_start) { 339468ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node)); 339568ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3396ca23e405STejun Heo term_area = area; 3397ca23e405STejun Heo continue; 3398ca23e405STejun Heo } 3399ca23e405STejun Heo 3400ca23e405STejun Heo /* 3401ca23e405STejun Heo * This area fits, move on to the previous one. If 3402ca23e405STejun Heo * the previous one is the terminal one, we're done. 3403ca23e405STejun Heo */ 3404ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 3405ca23e405STejun Heo if (area == term_area) 3406ca23e405STejun Heo break; 340768ad4a33SUladzislau Rezki (Sony) 3408ca23e405STejun Heo start = offsets[area]; 3409ca23e405STejun Heo end = start + sizes[area]; 341068ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end); 3411ca23e405STejun Heo } 341268ad4a33SUladzislau Rezki (Sony) 3413ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 3414ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 341568ad4a33SUladzislau Rezki (Sony) int ret; 3416ca23e405STejun Heo 341768ad4a33SUladzislau Rezki (Sony) start = base + offsets[area]; 341868ad4a33SUladzislau Rezki (Sony) size = sizes[area]; 341968ad4a33SUladzislau Rezki (Sony) 342068ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start); 342168ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL)) 342268ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 342368ad4a33SUladzislau Rezki (Sony) goto recovery; 342468ad4a33SUladzislau Rezki (Sony) 342568ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, start, size); 342668ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 342768ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 342868ad4a33SUladzislau Rezki (Sony) goto recovery; 342968ad4a33SUladzislau Rezki (Sony) 343068ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, start, size, type); 343168ad4a33SUladzislau Rezki (Sony) if (unlikely(ret)) 343268ad4a33SUladzislau Rezki (Sony) goto recovery; 343368ad4a33SUladzislau Rezki (Sony) 343468ad4a33SUladzislau Rezki (Sony) /* Allocated area. */ 343568ad4a33SUladzislau Rezki (Sony) va = vas[area]; 343668ad4a33SUladzislau Rezki (Sony) va->va_start = start; 343768ad4a33SUladzislau Rezki (Sony) va->va_end = start + size; 3438ca23e405STejun Heo } 3439ca23e405STejun Heo 3440e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 3441ca23e405STejun Heo 3442253a496dSDaniel Axtens /* populate the kasan shadow space */ 3443253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 3444253a496dSDaniel Axtens if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 3445253a496dSDaniel Axtens goto err_free_shadow; 3446253a496dSDaniel Axtens 3447253a496dSDaniel Axtens kasan_unpoison_vmalloc((void *)vas[area]->va_start, 3448253a496dSDaniel Axtens sizes[area]); 3449253a496dSDaniel Axtens } 3450253a496dSDaniel Axtens 3451ca23e405STejun Heo /* insert all vm's */ 3452e36176beSUladzislau Rezki (Sony) spin_lock(&vmap_area_lock); 3453e36176beSUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 3454e36176beSUladzislau Rezki (Sony) insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); 3455e36176beSUladzislau Rezki (Sony) 3456e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, 3457ca23e405STejun Heo pcpu_get_vm_areas); 3458e36176beSUladzislau Rezki (Sony) } 3459e36176beSUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 3460ca23e405STejun Heo 3461ca23e405STejun Heo kfree(vas); 3462ca23e405STejun Heo return vms; 3463ca23e405STejun Heo 346468ad4a33SUladzislau Rezki (Sony) recovery: 3465e36176beSUladzislau Rezki (Sony) /* 3466e36176beSUladzislau Rezki (Sony) * Remove previously allocated areas. There is no 3467e36176beSUladzislau Rezki (Sony) * need in removing these areas from the busy tree, 3468e36176beSUladzislau Rezki (Sony) * because they are inserted only on the final step 3469e36176beSUladzislau Rezki (Sony) * and when pcpu_get_vm_areas() is success. 3470e36176beSUladzislau Rezki (Sony) */ 347168ad4a33SUladzislau Rezki (Sony) while (area--) { 3472253a496dSDaniel Axtens orig_start = vas[area]->va_start; 3473253a496dSDaniel Axtens orig_end = vas[area]->va_end; 3474253a496dSDaniel Axtens va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, 34753c5c3cfbSDaniel Axtens &free_vmap_area_list); 3476253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 3477253a496dSDaniel Axtens va->va_start, va->va_end); 347868ad4a33SUladzislau Rezki (Sony) vas[area] = NULL; 347968ad4a33SUladzislau Rezki (Sony) } 348068ad4a33SUladzislau Rezki (Sony) 348168ad4a33SUladzislau Rezki (Sony) overflow: 3482e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 348368ad4a33SUladzislau Rezki (Sony) if (!purged) { 348468ad4a33SUladzislau Rezki (Sony) purge_vmap_area_lazy(); 348568ad4a33SUladzislau Rezki (Sony) purged = true; 348668ad4a33SUladzislau Rezki (Sony) 348768ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */ 348868ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 348968ad4a33SUladzislau Rezki (Sony) if (vas[area]) 349068ad4a33SUladzislau Rezki (Sony) continue; 349168ad4a33SUladzislau Rezki (Sony) 349268ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc( 349368ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL); 349468ad4a33SUladzislau Rezki (Sony) if (!vas[area]) 349568ad4a33SUladzislau Rezki (Sony) goto err_free; 349668ad4a33SUladzislau Rezki (Sony) } 349768ad4a33SUladzislau Rezki (Sony) 349868ad4a33SUladzislau Rezki (Sony) goto retry; 349968ad4a33SUladzislau Rezki (Sony) } 350068ad4a33SUladzislau Rezki (Sony) 3501ca23e405STejun Heo err_free: 3502ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 350368ad4a33SUladzislau Rezki (Sony) if (vas[area]) 350468ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]); 350568ad4a33SUladzislau Rezki (Sony) 3506ca23e405STejun Heo kfree(vms[area]); 3507ca23e405STejun Heo } 3508f1db7afdSKautuk Consul err_free2: 3509ca23e405STejun Heo kfree(vas); 3510ca23e405STejun Heo kfree(vms); 3511ca23e405STejun Heo return NULL; 3512253a496dSDaniel Axtens 3513253a496dSDaniel Axtens err_free_shadow: 3514253a496dSDaniel Axtens spin_lock(&free_vmap_area_lock); 3515253a496dSDaniel Axtens /* 3516253a496dSDaniel Axtens * We release all the vmalloc shadows, even the ones for regions that 3517253a496dSDaniel Axtens * hadn't been successfully added. This relies on kasan_release_vmalloc 3518253a496dSDaniel Axtens * being able to tolerate this case. 3519253a496dSDaniel Axtens */ 3520253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 3521253a496dSDaniel Axtens orig_start = vas[area]->va_start; 3522253a496dSDaniel Axtens orig_end = vas[area]->va_end; 3523253a496dSDaniel Axtens va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, 3524253a496dSDaniel Axtens &free_vmap_area_list); 3525253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 3526253a496dSDaniel Axtens va->va_start, va->va_end); 3527253a496dSDaniel Axtens vas[area] = NULL; 3528253a496dSDaniel Axtens kfree(vms[area]); 3529253a496dSDaniel Axtens } 3530253a496dSDaniel Axtens spin_unlock(&free_vmap_area_lock); 3531253a496dSDaniel Axtens kfree(vas); 3532253a496dSDaniel Axtens kfree(vms); 3533253a496dSDaniel Axtens return NULL; 3534ca23e405STejun Heo } 3535ca23e405STejun Heo 3536ca23e405STejun Heo /** 3537ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 3538ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 3539ca23e405STejun Heo * @nr_vms: the number of allocated areas 3540ca23e405STejun Heo * 3541ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 3542ca23e405STejun Heo */ 3543ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 3544ca23e405STejun Heo { 3545ca23e405STejun Heo int i; 3546ca23e405STejun Heo 3547ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 3548ca23e405STejun Heo free_vm_area(vms[i]); 3549ca23e405STejun Heo kfree(vms); 3550ca23e405STejun Heo } 35514f8b02b4STejun Heo #endif /* CONFIG_SMP */ 3552a10aa579SChristoph Lameter 3553a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 3554a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 3555e36176beSUladzislau Rezki (Sony) __acquires(&vmap_purge_lock) 3556d4033afdSJoonsoo Kim __acquires(&vmap_area_lock) 3557a10aa579SChristoph Lameter { 3558e36176beSUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock); 3559d4033afdSJoonsoo Kim spin_lock(&vmap_area_lock); 3560e36176beSUladzislau Rezki (Sony) 35613f500069Szijun_hu return seq_list_start(&vmap_area_list, *pos); 3562a10aa579SChristoph Lameter } 3563a10aa579SChristoph Lameter 3564a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3565a10aa579SChristoph Lameter { 35663f500069Szijun_hu return seq_list_next(p, &vmap_area_list, pos); 3567a10aa579SChristoph Lameter } 3568a10aa579SChristoph Lameter 3569a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 3570e36176beSUladzislau Rezki (Sony) __releases(&vmap_purge_lock) 3571d4033afdSJoonsoo Kim __releases(&vmap_area_lock) 3572a10aa579SChristoph Lameter { 3573e36176beSUladzislau Rezki (Sony) mutex_unlock(&vmap_purge_lock); 3574d4033afdSJoonsoo Kim spin_unlock(&vmap_area_lock); 3575a10aa579SChristoph Lameter } 3576a10aa579SChristoph Lameter 3577a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 3578a47a126aSEric Dumazet { 3579e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 3580a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 3581a47a126aSEric Dumazet 3582a47a126aSEric Dumazet if (!counters) 3583a47a126aSEric Dumazet return; 3584a47a126aSEric Dumazet 3585af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 3586af12346cSWanpeng Li return; 35877e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 35887e5b528bSDmitry Vyukov smp_rmb(); 3589af12346cSWanpeng Li 3590a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 3591a47a126aSEric Dumazet 3592a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 3593a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 3594a47a126aSEric Dumazet 3595a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 3596a47a126aSEric Dumazet if (counters[nr]) 3597a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 3598a47a126aSEric Dumazet } 3599a47a126aSEric Dumazet } 3600a47a126aSEric Dumazet 3601dd3b8353SUladzislau Rezki (Sony) static void show_purge_info(struct seq_file *m) 3602dd3b8353SUladzislau Rezki (Sony) { 3603dd3b8353SUladzislau Rezki (Sony) struct llist_node *head; 3604dd3b8353SUladzislau Rezki (Sony) struct vmap_area *va; 3605dd3b8353SUladzislau Rezki (Sony) 3606dd3b8353SUladzislau Rezki (Sony) head = READ_ONCE(vmap_purge_list.first); 3607dd3b8353SUladzislau Rezki (Sony) if (head == NULL) 3608dd3b8353SUladzislau Rezki (Sony) return; 3609dd3b8353SUladzislau Rezki (Sony) 3610dd3b8353SUladzislau Rezki (Sony) llist_for_each_entry(va, head, purge_list) { 3611dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 3612dd3b8353SUladzislau Rezki (Sony) (void *)va->va_start, (void *)va->va_end, 3613dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 3614dd3b8353SUladzislau Rezki (Sony) } 3615dd3b8353SUladzislau Rezki (Sony) } 3616dd3b8353SUladzislau Rezki (Sony) 3617a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 3618a10aa579SChristoph Lameter { 36193f500069Szijun_hu struct vmap_area *va; 3620d4033afdSJoonsoo Kim struct vm_struct *v; 3621d4033afdSJoonsoo Kim 36223f500069Szijun_hu va = list_entry(p, struct vmap_area, list); 36233f500069Szijun_hu 3624c2ce8c14SWanpeng Li /* 3625688fcbfcSPengfei Li * s_show can encounter race with remove_vm_area, !vm on behalf 3626688fcbfcSPengfei Li * of vmap area is being tear down or vm_map_ram allocation. 3627c2ce8c14SWanpeng Li */ 3628688fcbfcSPengfei Li if (!va->vm) { 3629dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 363078c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end, 3631dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 363278c72746SYisheng Xie 3633d4033afdSJoonsoo Kim return 0; 363478c72746SYisheng Xie } 3635d4033afdSJoonsoo Kim 3636d4033afdSJoonsoo Kim v = va->vm; 3637a10aa579SChristoph Lameter 363845ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 3639a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 3640a10aa579SChristoph Lameter 364162c70bceSJoe Perches if (v->caller) 364262c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 364323016969SChristoph Lameter 3644a10aa579SChristoph Lameter if (v->nr_pages) 3645a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 3646a10aa579SChristoph Lameter 3647a10aa579SChristoph Lameter if (v->phys_addr) 3648199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr); 3649a10aa579SChristoph Lameter 3650a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 3651f4527c90SFabian Frederick seq_puts(m, " ioremap"); 3652a10aa579SChristoph Lameter 3653a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 3654f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 3655a10aa579SChristoph Lameter 3656a10aa579SChristoph Lameter if (v->flags & VM_MAP) 3657f4527c90SFabian Frederick seq_puts(m, " vmap"); 3658a10aa579SChristoph Lameter 3659a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 3660f4527c90SFabian Frederick seq_puts(m, " user"); 3661a10aa579SChristoph Lameter 3662fe9041c2SChristoph Hellwig if (v->flags & VM_DMA_COHERENT) 3663fe9041c2SChristoph Hellwig seq_puts(m, " dma-coherent"); 3664fe9041c2SChristoph Hellwig 3665244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 3666f4527c90SFabian Frederick seq_puts(m, " vpages"); 3667a10aa579SChristoph Lameter 3668a47a126aSEric Dumazet show_numa_info(m, v); 3669a10aa579SChristoph Lameter seq_putc(m, '\n'); 3670dd3b8353SUladzislau Rezki (Sony) 3671dd3b8353SUladzislau Rezki (Sony) /* 3672dd3b8353SUladzislau Rezki (Sony) * As a final step, dump "unpurged" areas. Note, 3673dd3b8353SUladzislau Rezki (Sony) * that entire "/proc/vmallocinfo" output will not 3674dd3b8353SUladzislau Rezki (Sony) * be address sorted, because the purge list is not 3675dd3b8353SUladzislau Rezki (Sony) * sorted. 3676dd3b8353SUladzislau Rezki (Sony) */ 3677dd3b8353SUladzislau Rezki (Sony) if (list_is_last(&va->list, &vmap_area_list)) 3678dd3b8353SUladzislau Rezki (Sony) show_purge_info(m); 3679dd3b8353SUladzislau Rezki (Sony) 3680a10aa579SChristoph Lameter return 0; 3681a10aa579SChristoph Lameter } 3682a10aa579SChristoph Lameter 36835f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 3684a10aa579SChristoph Lameter .start = s_start, 3685a10aa579SChristoph Lameter .next = s_next, 3686a10aa579SChristoph Lameter .stop = s_stop, 3687a10aa579SChristoph Lameter .show = s_show, 3688a10aa579SChristoph Lameter }; 36895f6a6a9cSAlexey Dobriyan 36905f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 36915f6a6a9cSAlexey Dobriyan { 3692fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA)) 36930825a6f9SJoe Perches proc_create_seq_private("vmallocinfo", 0400, NULL, 369444414d82SChristoph Hellwig &vmalloc_op, 369544414d82SChristoph Hellwig nr_node_ids * sizeof(unsigned int), NULL); 3696fddda2b7SChristoph Hellwig else 36970825a6f9SJoe Perches proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 36985f6a6a9cSAlexey Dobriyan return 0; 36995f6a6a9cSAlexey Dobriyan } 37005f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 3701db3808c1SJoonsoo Kim 3702a10aa579SChristoph Lameter #endif 3703