11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 15c3edc401SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/spinlock.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 21868b104dSRick Edgecombe #include <linux/set_memory.h> 223ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2323016969SChristoph Lameter #include <linux/kallsyms.h> 24db64fe02SNick Piggin #include <linux/list.h> 254da56b99SChris Wilson #include <linux/notifier.h> 26db64fe02SNick Piggin #include <linux/rbtree.h> 27db64fe02SNick Piggin #include <linux/radix-tree.h> 28db64fe02SNick Piggin #include <linux/rcupdate.h> 29f0aa6617STejun Heo #include <linux/pfn.h> 3089219d37SCatalin Marinas #include <linux/kmemleak.h> 3160063497SArun Sharma #include <linux/atomic.h> 323b32123dSGideon Israel Dsouza #include <linux/compiler.h> 3332fcfd40SAl Viro #include <linux/llist.h> 340f616be1SToshi Kani #include <linux/bitops.h> 35*68ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h> 363b32123dSGideon Israel Dsouza 377c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 381da177e4SLinus Torvalds #include <asm/tlbflush.h> 392dca6999SDavid Miller #include <asm/shmparam.h> 401da177e4SLinus Torvalds 41dd56b046SMel Gorman #include "internal.h" 42dd56b046SMel Gorman 4332fcfd40SAl Viro struct vfree_deferred { 4432fcfd40SAl Viro struct llist_head list; 4532fcfd40SAl Viro struct work_struct wq; 4632fcfd40SAl Viro }; 4732fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 4832fcfd40SAl Viro 4932fcfd40SAl Viro static void __vunmap(const void *, int); 5032fcfd40SAl Viro 5132fcfd40SAl Viro static void free_work(struct work_struct *w) 5232fcfd40SAl Viro { 5332fcfd40SAl Viro struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 54894e58c1SByungchul Park struct llist_node *t, *llnode; 55894e58c1SByungchul Park 56894e58c1SByungchul Park llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 57894e58c1SByungchul Park __vunmap((void *)llnode, 1); 5832fcfd40SAl Viro } 5932fcfd40SAl Viro 60db64fe02SNick Piggin /*** Page table manipulation functions ***/ 61b221385bSAdrian Bunk 621da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 631da177e4SLinus Torvalds { 641da177e4SLinus Torvalds pte_t *pte; 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 671da177e4SLinus Torvalds do { 681da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 691da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 701da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 711da177e4SLinus Torvalds } 721da177e4SLinus Torvalds 73db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 741da177e4SLinus Torvalds { 751da177e4SLinus Torvalds pmd_t *pmd; 761da177e4SLinus Torvalds unsigned long next; 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 791da177e4SLinus Torvalds do { 801da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 81b9820d8fSToshi Kani if (pmd_clear_huge(pmd)) 82b9820d8fSToshi Kani continue; 831da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 841da177e4SLinus Torvalds continue; 851da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 861da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 871da177e4SLinus Torvalds } 881da177e4SLinus Torvalds 89c2febafcSKirill A. Shutemov static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) 901da177e4SLinus Torvalds { 911da177e4SLinus Torvalds pud_t *pud; 921da177e4SLinus Torvalds unsigned long next; 931da177e4SLinus Torvalds 94c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 951da177e4SLinus Torvalds do { 961da177e4SLinus Torvalds next = pud_addr_end(addr, end); 97b9820d8fSToshi Kani if (pud_clear_huge(pud)) 98b9820d8fSToshi Kani continue; 991da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 1001da177e4SLinus Torvalds continue; 1011da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 1021da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1031da177e4SLinus Torvalds } 1041da177e4SLinus Torvalds 105c2febafcSKirill A. Shutemov static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) 106c2febafcSKirill A. Shutemov { 107c2febafcSKirill A. Shutemov p4d_t *p4d; 108c2febafcSKirill A. Shutemov unsigned long next; 109c2febafcSKirill A. Shutemov 110c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 111c2febafcSKirill A. Shutemov do { 112c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 113c2febafcSKirill A. Shutemov if (p4d_clear_huge(p4d)) 114c2febafcSKirill A. Shutemov continue; 115c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 116c2febafcSKirill A. Shutemov continue; 117c2febafcSKirill A. Shutemov vunmap_pud_range(p4d, addr, next); 118c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 119c2febafcSKirill A. Shutemov } 120c2febafcSKirill A. Shutemov 121db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 1221da177e4SLinus Torvalds { 1231da177e4SLinus Torvalds pgd_t *pgd; 1241da177e4SLinus Torvalds unsigned long next; 1251da177e4SLinus Torvalds 1261da177e4SLinus Torvalds BUG_ON(addr >= end); 1271da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1281da177e4SLinus Torvalds do { 1291da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 1301da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 1311da177e4SLinus Torvalds continue; 132c2febafcSKirill A. Shutemov vunmap_p4d_range(pgd, addr, next); 1331da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1341da177e4SLinus Torvalds } 1351da177e4SLinus Torvalds 1361da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 137db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1381da177e4SLinus Torvalds { 1391da177e4SLinus Torvalds pte_t *pte; 1401da177e4SLinus Torvalds 141db64fe02SNick Piggin /* 142db64fe02SNick Piggin * nr is a running index into the array which helps higher level 143db64fe02SNick Piggin * callers keep track of where we're up to. 144db64fe02SNick Piggin */ 145db64fe02SNick Piggin 146872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1471da177e4SLinus Torvalds if (!pte) 1481da177e4SLinus Torvalds return -ENOMEM; 1491da177e4SLinus Torvalds do { 150db64fe02SNick Piggin struct page *page = pages[*nr]; 151db64fe02SNick Piggin 152db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 153db64fe02SNick Piggin return -EBUSY; 154db64fe02SNick Piggin if (WARN_ON(!page)) 1551da177e4SLinus Torvalds return -ENOMEM; 1561da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 157db64fe02SNick Piggin (*nr)++; 1581da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1591da177e4SLinus Torvalds return 0; 1601da177e4SLinus Torvalds } 1611da177e4SLinus Torvalds 162db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 163db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1641da177e4SLinus Torvalds { 1651da177e4SLinus Torvalds pmd_t *pmd; 1661da177e4SLinus Torvalds unsigned long next; 1671da177e4SLinus Torvalds 1681da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1691da177e4SLinus Torvalds if (!pmd) 1701da177e4SLinus Torvalds return -ENOMEM; 1711da177e4SLinus Torvalds do { 1721da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 173db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1741da177e4SLinus Torvalds return -ENOMEM; 1751da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1761da177e4SLinus Torvalds return 0; 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 179c2febafcSKirill A. Shutemov static int vmap_pud_range(p4d_t *p4d, unsigned long addr, 180db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1811da177e4SLinus Torvalds { 1821da177e4SLinus Torvalds pud_t *pud; 1831da177e4SLinus Torvalds unsigned long next; 1841da177e4SLinus Torvalds 185c2febafcSKirill A. Shutemov pud = pud_alloc(&init_mm, p4d, addr); 1861da177e4SLinus Torvalds if (!pud) 1871da177e4SLinus Torvalds return -ENOMEM; 1881da177e4SLinus Torvalds do { 1891da177e4SLinus Torvalds next = pud_addr_end(addr, end); 190db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 1911da177e4SLinus Torvalds return -ENOMEM; 1921da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1931da177e4SLinus Torvalds return 0; 1941da177e4SLinus Torvalds } 1951da177e4SLinus Torvalds 196c2febafcSKirill A. Shutemov static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, 197c2febafcSKirill A. Shutemov unsigned long end, pgprot_t prot, struct page **pages, int *nr) 198c2febafcSKirill A. Shutemov { 199c2febafcSKirill A. Shutemov p4d_t *p4d; 200c2febafcSKirill A. Shutemov unsigned long next; 201c2febafcSKirill A. Shutemov 202c2febafcSKirill A. Shutemov p4d = p4d_alloc(&init_mm, pgd, addr); 203c2febafcSKirill A. Shutemov if (!p4d) 204c2febafcSKirill A. Shutemov return -ENOMEM; 205c2febafcSKirill A. Shutemov do { 206c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 207c2febafcSKirill A. Shutemov if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) 208c2febafcSKirill A. Shutemov return -ENOMEM; 209c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 210c2febafcSKirill A. Shutemov return 0; 211c2febafcSKirill A. Shutemov } 212c2febafcSKirill A. Shutemov 213db64fe02SNick Piggin /* 214db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 215db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 216db64fe02SNick Piggin * 217db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 218db64fe02SNick Piggin */ 2198fc48985STejun Heo static int vmap_page_range_noflush(unsigned long start, unsigned long end, 220db64fe02SNick Piggin pgprot_t prot, struct page **pages) 2211da177e4SLinus Torvalds { 2221da177e4SLinus Torvalds pgd_t *pgd; 2231da177e4SLinus Torvalds unsigned long next; 2242e4e27c7SAdam Lackorzynski unsigned long addr = start; 225db64fe02SNick Piggin int err = 0; 226db64fe02SNick Piggin int nr = 0; 2271da177e4SLinus Torvalds 2281da177e4SLinus Torvalds BUG_ON(addr >= end); 2291da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 2301da177e4SLinus Torvalds do { 2311da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 232c2febafcSKirill A. Shutemov err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); 2331da177e4SLinus Torvalds if (err) 234bf88c8c8SFigo.zhang return err; 2351da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 236db64fe02SNick Piggin 237db64fe02SNick Piggin return nr; 2381da177e4SLinus Torvalds } 2391da177e4SLinus Torvalds 2408fc48985STejun Heo static int vmap_page_range(unsigned long start, unsigned long end, 2418fc48985STejun Heo pgprot_t prot, struct page **pages) 2428fc48985STejun Heo { 2438fc48985STejun Heo int ret; 2448fc48985STejun Heo 2458fc48985STejun Heo ret = vmap_page_range_noflush(start, end, prot, pages); 2468fc48985STejun Heo flush_cache_vmap(start, end); 2478fc48985STejun Heo return ret; 2488fc48985STejun Heo } 2498fc48985STejun Heo 25081ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 25173bdf0a6SLinus Torvalds { 25273bdf0a6SLinus Torvalds /* 253ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 25473bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 25573bdf0a6SLinus Torvalds * just put it in the vmalloc space. 25673bdf0a6SLinus Torvalds */ 25773bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 25873bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 25973bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 26073bdf0a6SLinus Torvalds return 1; 26173bdf0a6SLinus Torvalds #endif 26273bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 26373bdf0a6SLinus Torvalds } 26473bdf0a6SLinus Torvalds 26548667e7aSChristoph Lameter /* 266add688fbSmalc * Walk a vmap address to the struct page it maps. 26748667e7aSChristoph Lameter */ 268add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 26948667e7aSChristoph Lameter { 27048667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 271add688fbSmalc struct page *page = NULL; 27248667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 273c2febafcSKirill A. Shutemov p4d_t *p4d; 274c2febafcSKirill A. Shutemov pud_t *pud; 275c2febafcSKirill A. Shutemov pmd_t *pmd; 276c2febafcSKirill A. Shutemov pte_t *ptep, pte; 27748667e7aSChristoph Lameter 2787aa413deSIngo Molnar /* 2797aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2807aa413deSIngo Molnar * architectures that do not vmalloc module space 2817aa413deSIngo Molnar */ 28273bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 28359ea7463SJiri Slaby 284c2febafcSKirill A. Shutemov if (pgd_none(*pgd)) 285c2febafcSKirill A. Shutemov return NULL; 286c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 287c2febafcSKirill A. Shutemov if (p4d_none(*p4d)) 288c2febafcSKirill A. Shutemov return NULL; 289c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 290029c54b0SArd Biesheuvel 291029c54b0SArd Biesheuvel /* 292029c54b0SArd Biesheuvel * Don't dereference bad PUD or PMD (below) entries. This will also 293029c54b0SArd Biesheuvel * identify huge mappings, which we may encounter on architectures 294029c54b0SArd Biesheuvel * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be 295029c54b0SArd Biesheuvel * identified as vmalloc addresses by is_vmalloc_addr(), but are 296029c54b0SArd Biesheuvel * not [unambiguously] associated with a struct page, so there is 297029c54b0SArd Biesheuvel * no correct value to return for them. 298029c54b0SArd Biesheuvel */ 299029c54b0SArd Biesheuvel WARN_ON_ONCE(pud_bad(*pud)); 300029c54b0SArd Biesheuvel if (pud_none(*pud) || pud_bad(*pud)) 301c2febafcSKirill A. Shutemov return NULL; 302c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr); 303029c54b0SArd Biesheuvel WARN_ON_ONCE(pmd_bad(*pmd)); 304029c54b0SArd Biesheuvel if (pmd_none(*pmd) || pmd_bad(*pmd)) 305c2febafcSKirill A. Shutemov return NULL; 306db64fe02SNick Piggin 30748667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 30848667e7aSChristoph Lameter pte = *ptep; 30948667e7aSChristoph Lameter if (pte_present(pte)) 310add688fbSmalc page = pte_page(pte); 31148667e7aSChristoph Lameter pte_unmap(ptep); 312add688fbSmalc return page; 313ece86e22SJianyu Zhan } 314ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 315ece86e22SJianyu Zhan 316add688fbSmalc /* 317add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 318add688fbSmalc */ 319add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 320add688fbSmalc { 321add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 322add688fbSmalc } 323add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 324add688fbSmalc 325db64fe02SNick Piggin 326db64fe02SNick Piggin /*** Global kva allocator ***/ 327db64fe02SNick Piggin 32878c72746SYisheng Xie #define VM_LAZY_FREE 0x02 329db64fe02SNick Piggin #define VM_VM_AREA 0x04 330db64fe02SNick Piggin 331db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 332f1c4069eSJoonsoo Kim /* Export for kexec only */ 333f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list); 33480c4bd7aSChris Wilson static LLIST_HEAD(vmap_purge_list); 33589699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 336*68ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly; 33789699605SNick Piggin 338*68ad4a33SUladzislau Rezki (Sony) /* 339*68ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of 340*68ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to 341*68ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of 342*68ad4a33SUladzislau Rezki (Sony) * free block. 343*68ad4a33SUladzislau Rezki (Sony) */ 344*68ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep; 34589699605SNick Piggin 346*68ad4a33SUladzislau Rezki (Sony) /* 347*68ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root. 348*68ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing. 349*68ad4a33SUladzislau Rezki (Sony) */ 350*68ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list); 351*68ad4a33SUladzislau Rezki (Sony) 352*68ad4a33SUladzislau Rezki (Sony) /* 353*68ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space. 354*68ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start 355*68ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap 356*68ad4a33SUladzislau Rezki (Sony) * object is released. 357*68ad4a33SUladzislau Rezki (Sony) * 358*68ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block 359*68ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to 360*68ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area. 361*68ad4a33SUladzislau Rezki (Sony) */ 362*68ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT; 363*68ad4a33SUladzislau Rezki (Sony) 364*68ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 365*68ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va) 366*68ad4a33SUladzislau Rezki (Sony) { 367*68ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start); 368*68ad4a33SUladzislau Rezki (Sony) } 369*68ad4a33SUladzislau Rezki (Sony) 370*68ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 371*68ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node) 372*68ad4a33SUladzislau Rezki (Sony) { 373*68ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 374*68ad4a33SUladzislau Rezki (Sony) 375*68ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node); 376*68ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0; 377*68ad4a33SUladzislau Rezki (Sony) } 378*68ad4a33SUladzislau Rezki (Sony) 379*68ad4a33SUladzislau Rezki (Sony) /* 380*68ad4a33SUladzislau Rezki (Sony) * Gets called when remove the node and rotate. 381*68ad4a33SUladzislau Rezki (Sony) */ 382*68ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 383*68ad4a33SUladzislau Rezki (Sony) compute_subtree_max_size(struct vmap_area *va) 384*68ad4a33SUladzislau Rezki (Sony) { 385*68ad4a33SUladzislau Rezki (Sony) return max3(va_size(va), 386*68ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_left), 387*68ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_right)); 388*68ad4a33SUladzislau Rezki (Sony) } 389*68ad4a33SUladzislau Rezki (Sony) 390*68ad4a33SUladzislau Rezki (Sony) RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb, 391*68ad4a33SUladzislau Rezki (Sony) struct vmap_area, rb_node, unsigned long, subtree_max_size, 392*68ad4a33SUladzislau Rezki (Sony) compute_subtree_max_size) 393*68ad4a33SUladzislau Rezki (Sony) 394*68ad4a33SUladzislau Rezki (Sony) static void purge_vmap_area_lazy(void); 395*68ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 396*68ad4a33SUladzislau Rezki (Sony) static unsigned long lazy_max_pages(void); 397db64fe02SNick Piggin 398db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 3991da177e4SLinus Torvalds { 400db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 401db64fe02SNick Piggin 402db64fe02SNick Piggin while (n) { 403db64fe02SNick Piggin struct vmap_area *va; 404db64fe02SNick Piggin 405db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 406db64fe02SNick Piggin if (addr < va->va_start) 407db64fe02SNick Piggin n = n->rb_left; 408cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 409db64fe02SNick Piggin n = n->rb_right; 410db64fe02SNick Piggin else 411db64fe02SNick Piggin return va; 412db64fe02SNick Piggin } 413db64fe02SNick Piggin 414db64fe02SNick Piggin return NULL; 415db64fe02SNick Piggin } 416db64fe02SNick Piggin 417*68ad4a33SUladzislau Rezki (Sony) /* 418*68ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node 419*68ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing. 420*68ad4a33SUladzislau Rezki (Sony) */ 421*68ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node ** 422*68ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va, 423*68ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from, 424*68ad4a33SUladzislau Rezki (Sony) struct rb_node **parent) 425db64fe02SNick Piggin { 426170168d0SNamhyung Kim struct vmap_area *tmp_va; 427*68ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 428db64fe02SNick Piggin 429*68ad4a33SUladzislau Rezki (Sony) if (root) { 430*68ad4a33SUladzislau Rezki (Sony) link = &root->rb_node; 431*68ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) { 432*68ad4a33SUladzislau Rezki (Sony) *parent = NULL; 433*68ad4a33SUladzislau Rezki (Sony) return link; 434*68ad4a33SUladzislau Rezki (Sony) } 435*68ad4a33SUladzislau Rezki (Sony) } else { 436*68ad4a33SUladzislau Rezki (Sony) link = &from; 437*68ad4a33SUladzislau Rezki (Sony) } 438*68ad4a33SUladzislau Rezki (Sony) 439*68ad4a33SUladzislau Rezki (Sony) /* 440*68ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point 441*68ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name 442*68ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to. 443*68ad4a33SUladzislau Rezki (Sony) */ 444*68ad4a33SUladzislau Rezki (Sony) do { 445*68ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node); 446*68ad4a33SUladzislau Rezki (Sony) 447*68ad4a33SUladzislau Rezki (Sony) /* 448*68ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check. 449*68ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right) 450*68ad4a33SUladzislau Rezki (Sony) * or full overlaps. 451*68ad4a33SUladzislau Rezki (Sony) */ 452*68ad4a33SUladzislau Rezki (Sony) if (va->va_start < tmp_va->va_end && 453*68ad4a33SUladzislau Rezki (Sony) va->va_end <= tmp_va->va_start) 454*68ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left; 455*68ad4a33SUladzislau Rezki (Sony) else if (va->va_end > tmp_va->va_start && 456*68ad4a33SUladzislau Rezki (Sony) va->va_start >= tmp_va->va_end) 457*68ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right; 458db64fe02SNick Piggin else 459db64fe02SNick Piggin BUG(); 460*68ad4a33SUladzislau Rezki (Sony) } while (*link); 461*68ad4a33SUladzislau Rezki (Sony) 462*68ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node; 463*68ad4a33SUladzislau Rezki (Sony) return link; 464db64fe02SNick Piggin } 465db64fe02SNick Piggin 466*68ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head * 467*68ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 468*68ad4a33SUladzislau Rezki (Sony) { 469*68ad4a33SUladzislau Rezki (Sony) struct list_head *list; 470db64fe02SNick Piggin 471*68ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent)) 472*68ad4a33SUladzislau Rezki (Sony) /* 473*68ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors 474*68ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means 475*68ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not 476*68ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway. 477*68ad4a33SUladzislau Rezki (Sony) */ 478*68ad4a33SUladzislau Rezki (Sony) return NULL; 479*68ad4a33SUladzislau Rezki (Sony) 480*68ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list; 481*68ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list); 482db64fe02SNick Piggin } 483db64fe02SNick Piggin 484*68ad4a33SUladzislau Rezki (Sony) static __always_inline void 485*68ad4a33SUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root, 486*68ad4a33SUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, struct list_head *head) 487*68ad4a33SUladzislau Rezki (Sony) { 488*68ad4a33SUladzislau Rezki (Sony) /* 489*68ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can 490*68ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node. 491*68ad4a33SUladzislau Rezki (Sony) */ 492*68ad4a33SUladzislau Rezki (Sony) if (likely(parent)) { 493*68ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list; 494*68ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link) 495*68ad4a33SUladzislau Rezki (Sony) head = head->prev; 496*68ad4a33SUladzislau Rezki (Sony) } 497db64fe02SNick Piggin 498*68ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */ 499*68ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link); 500*68ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) { 501*68ad4a33SUladzislau Rezki (Sony) /* 502*68ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion 503*68ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to 504*68ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented(). 505*68ad4a33SUladzislau Rezki (Sony) * It is because of we populate the tree from the bottom 506*68ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree. 507*68ad4a33SUladzislau Rezki (Sony) * 508*68ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion, 509*68ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to 510*68ad4a33SUladzislau Rezki (Sony) * the correct order later on. 511*68ad4a33SUladzislau Rezki (Sony) */ 512*68ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node, 513*68ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 514*68ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0; 515*68ad4a33SUladzislau Rezki (Sony) } else { 516*68ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root); 517*68ad4a33SUladzislau Rezki (Sony) } 518*68ad4a33SUladzislau Rezki (Sony) 519*68ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */ 520*68ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head); 521*68ad4a33SUladzislau Rezki (Sony) } 522*68ad4a33SUladzislau Rezki (Sony) 523*68ad4a33SUladzislau Rezki (Sony) static __always_inline void 524*68ad4a33SUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root) 525*68ad4a33SUladzislau Rezki (Sony) { 526*68ad4a33SUladzislau Rezki (Sony) /* 527*68ad4a33SUladzislau Rezki (Sony) * During merging a VA node can be empty, therefore 528*68ad4a33SUladzislau Rezki (Sony) * not linked with the tree nor list. Just check it. 529*68ad4a33SUladzislau Rezki (Sony) */ 530*68ad4a33SUladzislau Rezki (Sony) if (!RB_EMPTY_NODE(&va->rb_node)) { 531*68ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) 532*68ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node, 533*68ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 534*68ad4a33SUladzislau Rezki (Sony) else 535*68ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root); 536*68ad4a33SUladzislau Rezki (Sony) 537*68ad4a33SUladzislau Rezki (Sony) list_del(&va->list); 538*68ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node); 539*68ad4a33SUladzislau Rezki (Sony) } 540*68ad4a33SUladzislau Rezki (Sony) } 541*68ad4a33SUladzislau Rezki (Sony) 542*68ad4a33SUladzislau Rezki (Sony) /* 543*68ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper 544*68ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done 545*68ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or 546*68ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree. 547*68ad4a33SUladzislau Rezki (Sony) * 548*68ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called: 549*68ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path); 550*68ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path); 551*68ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path). 552*68ad4a33SUladzislau Rezki (Sony) * 553*68ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes 554*68ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up 555*68ad4a33SUladzislau Rezki (Sony) * to the root node. 556*68ad4a33SUladzislau Rezki (Sony) * 557*68ad4a33SUladzislau Rezki (Sony) * 4--8 558*68ad4a33SUladzislau Rezki (Sony) * /\ 559*68ad4a33SUladzislau Rezki (Sony) * / \ 560*68ad4a33SUladzislau Rezki (Sony) * / \ 561*68ad4a33SUladzislau Rezki (Sony) * 2--2 8--8 562*68ad4a33SUladzislau Rezki (Sony) * 563*68ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then 564*68ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1 565*68ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink 566*68ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent 567*68ad4a33SUladzislau Rezki (Sony) * node becomes 4--6. 568*68ad4a33SUladzislau Rezki (Sony) */ 569*68ad4a33SUladzislau Rezki (Sony) static __always_inline void 570*68ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va) 571*68ad4a33SUladzislau Rezki (Sony) { 572*68ad4a33SUladzislau Rezki (Sony) struct rb_node *node = &va->rb_node; 573*68ad4a33SUladzislau Rezki (Sony) unsigned long new_va_sub_max_size; 574*68ad4a33SUladzislau Rezki (Sony) 575*68ad4a33SUladzislau Rezki (Sony) while (node) { 576*68ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 577*68ad4a33SUladzislau Rezki (Sony) new_va_sub_max_size = compute_subtree_max_size(va); 578*68ad4a33SUladzislau Rezki (Sony) 579*68ad4a33SUladzislau Rezki (Sony) /* 580*68ad4a33SUladzislau Rezki (Sony) * If the newly calculated maximum available size of the 581*68ad4a33SUladzislau Rezki (Sony) * subtree is equal to the current one, then it means that 582*68ad4a33SUladzislau Rezki (Sony) * the tree is propagated correctly. So we have to stop at 583*68ad4a33SUladzislau Rezki (Sony) * this point to save cycles. 584*68ad4a33SUladzislau Rezki (Sony) */ 585*68ad4a33SUladzislau Rezki (Sony) if (va->subtree_max_size == new_va_sub_max_size) 586*68ad4a33SUladzislau Rezki (Sony) break; 587*68ad4a33SUladzislau Rezki (Sony) 588*68ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = new_va_sub_max_size; 589*68ad4a33SUladzislau Rezki (Sony) node = rb_parent(&va->rb_node); 590*68ad4a33SUladzislau Rezki (Sony) } 591*68ad4a33SUladzislau Rezki (Sony) } 592*68ad4a33SUladzislau Rezki (Sony) 593*68ad4a33SUladzislau Rezki (Sony) static void 594*68ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va, 595*68ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 596*68ad4a33SUladzislau Rezki (Sony) { 597*68ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 598*68ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 599*68ad4a33SUladzislau Rezki (Sony) 600*68ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 601*68ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 602*68ad4a33SUladzislau Rezki (Sony) } 603*68ad4a33SUladzislau Rezki (Sony) 604*68ad4a33SUladzislau Rezki (Sony) static void 605*68ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va, 606*68ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root, 607*68ad4a33SUladzislau Rezki (Sony) struct list_head *head) 608*68ad4a33SUladzislau Rezki (Sony) { 609*68ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 610*68ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 611*68ad4a33SUladzislau Rezki (Sony) 612*68ad4a33SUladzislau Rezki (Sony) if (from) 613*68ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent); 614*68ad4a33SUladzislau Rezki (Sony) else 615*68ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 616*68ad4a33SUladzislau Rezki (Sony) 617*68ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 618*68ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 619*68ad4a33SUladzislau Rezki (Sony) } 620*68ad4a33SUladzislau Rezki (Sony) 621*68ad4a33SUladzislau Rezki (Sony) /* 622*68ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous 623*68ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new 624*68ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is 625*68ad4a33SUladzislau Rezki (Sony) * freed. 626*68ad4a33SUladzislau Rezki (Sony) */ 627*68ad4a33SUladzislau Rezki (Sony) static __always_inline void 628*68ad4a33SUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va, 629*68ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 630*68ad4a33SUladzislau Rezki (Sony) { 631*68ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling; 632*68ad4a33SUladzislau Rezki (Sony) struct list_head *next; 633*68ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 634*68ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 635*68ad4a33SUladzislau Rezki (Sony) bool merged = false; 636*68ad4a33SUladzislau Rezki (Sony) 637*68ad4a33SUladzislau Rezki (Sony) /* 638*68ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be 639*68ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings. 640*68ad4a33SUladzislau Rezki (Sony) */ 641*68ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 642*68ad4a33SUladzislau Rezki (Sony) 643*68ad4a33SUladzislau Rezki (Sony) /* 644*68ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done. 645*68ad4a33SUladzislau Rezki (Sony) */ 646*68ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link); 647*68ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL)) 648*68ad4a33SUladzislau Rezki (Sony) goto insert; 649*68ad4a33SUladzislau Rezki (Sony) 650*68ad4a33SUladzislau Rezki (Sony) /* 651*68ad4a33SUladzislau Rezki (Sony) * start end 652*68ad4a33SUladzislau Rezki (Sony) * | | 653*68ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->| 654*68ad4a33SUladzislau Rezki (Sony) * | | 655*68ad4a33SUladzislau Rezki (Sony) * start end 656*68ad4a33SUladzislau Rezki (Sony) */ 657*68ad4a33SUladzislau Rezki (Sony) if (next != head) { 658*68ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list); 659*68ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) { 660*68ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start; 661*68ad4a33SUladzislau Rezki (Sony) 662*68ad4a33SUladzislau Rezki (Sony) /* Check and update the tree if needed. */ 663*68ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(sibling); 664*68ad4a33SUladzislau Rezki (Sony) 665*68ad4a33SUladzislau Rezki (Sony) /* Remove this VA, it has been merged. */ 666*68ad4a33SUladzislau Rezki (Sony) unlink_va(va, root); 667*68ad4a33SUladzislau Rezki (Sony) 668*68ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 669*68ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 670*68ad4a33SUladzislau Rezki (Sony) 671*68ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */ 672*68ad4a33SUladzislau Rezki (Sony) va = sibling; 673*68ad4a33SUladzislau Rezki (Sony) merged = true; 674*68ad4a33SUladzislau Rezki (Sony) } 675*68ad4a33SUladzislau Rezki (Sony) } 676*68ad4a33SUladzislau Rezki (Sony) 677*68ad4a33SUladzislau Rezki (Sony) /* 678*68ad4a33SUladzislau Rezki (Sony) * start end 679*68ad4a33SUladzislau Rezki (Sony) * | | 680*68ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>| 681*68ad4a33SUladzislau Rezki (Sony) * | | 682*68ad4a33SUladzislau Rezki (Sony) * start end 683*68ad4a33SUladzislau Rezki (Sony) */ 684*68ad4a33SUladzislau Rezki (Sony) if (next->prev != head) { 685*68ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list); 686*68ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) { 687*68ad4a33SUladzislau Rezki (Sony) sibling->va_end = va->va_end; 688*68ad4a33SUladzislau Rezki (Sony) 689*68ad4a33SUladzislau Rezki (Sony) /* Check and update the tree if needed. */ 690*68ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(sibling); 691*68ad4a33SUladzislau Rezki (Sony) 692*68ad4a33SUladzislau Rezki (Sony) /* Remove this VA, it has been merged. */ 693*68ad4a33SUladzislau Rezki (Sony) unlink_va(va, root); 694*68ad4a33SUladzislau Rezki (Sony) 695*68ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 696*68ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 697*68ad4a33SUladzislau Rezki (Sony) 698*68ad4a33SUladzislau Rezki (Sony) return; 699*68ad4a33SUladzislau Rezki (Sony) } 700*68ad4a33SUladzislau Rezki (Sony) } 701*68ad4a33SUladzislau Rezki (Sony) 702*68ad4a33SUladzislau Rezki (Sony) insert: 703*68ad4a33SUladzislau Rezki (Sony) if (!merged) { 704*68ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 705*68ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 706*68ad4a33SUladzislau Rezki (Sony) } 707*68ad4a33SUladzislau Rezki (Sony) } 708*68ad4a33SUladzislau Rezki (Sony) 709*68ad4a33SUladzislau Rezki (Sony) static __always_inline bool 710*68ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size, 711*68ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 712*68ad4a33SUladzislau Rezki (Sony) { 713*68ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 714*68ad4a33SUladzislau Rezki (Sony) 715*68ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 716*68ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 717*68ad4a33SUladzislau Rezki (Sony) else 718*68ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 719*68ad4a33SUladzislau Rezki (Sony) 720*68ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */ 721*68ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr || 722*68ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart) 723*68ad4a33SUladzislau Rezki (Sony) return false; 724*68ad4a33SUladzislau Rezki (Sony) 725*68ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end); 726*68ad4a33SUladzislau Rezki (Sony) } 727*68ad4a33SUladzislau Rezki (Sony) 728*68ad4a33SUladzislau Rezki (Sony) /* 729*68ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree, 730*68ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing 731*68ad4a33SUladzislau Rezki (Sony) * parameters. 732*68ad4a33SUladzislau Rezki (Sony) */ 733*68ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 734*68ad4a33SUladzislau Rezki (Sony) find_vmap_lowest_match(unsigned long size, 735*68ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 736*68ad4a33SUladzislau Rezki (Sony) { 737*68ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 738*68ad4a33SUladzislau Rezki (Sony) struct rb_node *node; 739*68ad4a33SUladzislau Rezki (Sony) unsigned long length; 740*68ad4a33SUladzislau Rezki (Sony) 741*68ad4a33SUladzislau Rezki (Sony) /* Start from the root. */ 742*68ad4a33SUladzislau Rezki (Sony) node = free_vmap_area_root.rb_node; 743*68ad4a33SUladzislau Rezki (Sony) 744*68ad4a33SUladzislau Rezki (Sony) /* Adjust the search size for alignment overhead. */ 745*68ad4a33SUladzislau Rezki (Sony) length = size + align - 1; 746*68ad4a33SUladzislau Rezki (Sony) 747*68ad4a33SUladzislau Rezki (Sony) while (node) { 748*68ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 749*68ad4a33SUladzislau Rezki (Sony) 750*68ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_left) >= length && 751*68ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) { 752*68ad4a33SUladzislau Rezki (Sony) node = node->rb_left; 753*68ad4a33SUladzislau Rezki (Sony) } else { 754*68ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 755*68ad4a33SUladzislau Rezki (Sony) return va; 756*68ad4a33SUladzislau Rezki (Sony) 757*68ad4a33SUladzislau Rezki (Sony) /* 758*68ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right 759*68ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is 760*68ad4a33SUladzislau Rezki (Sony) * equal or bigger to the requested search length. 761*68ad4a33SUladzislau Rezki (Sony) */ 762*68ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length) { 763*68ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 764*68ad4a33SUladzislau Rezki (Sony) continue; 765*68ad4a33SUladzislau Rezki (Sony) } 766*68ad4a33SUladzislau Rezki (Sony) 767*68ad4a33SUladzislau Rezki (Sony) /* 768*68ad4a33SUladzislau Rezki (Sony) * OK. We roll back and find the fist right sub-tree, 769*68ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen 770*68ad4a33SUladzislau Rezki (Sony) * only once due to "vstart" restriction. 771*68ad4a33SUladzislau Rezki (Sony) */ 772*68ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) { 773*68ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 774*68ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 775*68ad4a33SUladzislau Rezki (Sony) return va; 776*68ad4a33SUladzislau Rezki (Sony) 777*68ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length && 778*68ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) { 779*68ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 780*68ad4a33SUladzislau Rezki (Sony) break; 781*68ad4a33SUladzislau Rezki (Sony) } 782*68ad4a33SUladzislau Rezki (Sony) } 783*68ad4a33SUladzislau Rezki (Sony) } 784*68ad4a33SUladzislau Rezki (Sony) } 785*68ad4a33SUladzislau Rezki (Sony) 786*68ad4a33SUladzislau Rezki (Sony) return NULL; 787*68ad4a33SUladzislau Rezki (Sony) } 788*68ad4a33SUladzislau Rezki (Sony) 789*68ad4a33SUladzislau Rezki (Sony) enum fit_type { 790*68ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0, 791*68ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */ 792*68ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */ 793*68ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */ 794*68ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */ 795*68ad4a33SUladzislau Rezki (Sony) }; 796*68ad4a33SUladzislau Rezki (Sony) 797*68ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type 798*68ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va, 799*68ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size) 800*68ad4a33SUladzislau Rezki (Sony) { 801*68ad4a33SUladzislau Rezki (Sony) enum fit_type type; 802*68ad4a33SUladzislau Rezki (Sony) 803*68ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */ 804*68ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start || 805*68ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end) 806*68ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT; 807*68ad4a33SUladzislau Rezki (Sony) 808*68ad4a33SUladzislau Rezki (Sony) /* Now classify. */ 809*68ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) { 810*68ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size) 811*68ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE; 812*68ad4a33SUladzislau Rezki (Sony) else 813*68ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE; 814*68ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) { 815*68ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE; 816*68ad4a33SUladzislau Rezki (Sony) } else { 817*68ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE; 818*68ad4a33SUladzislau Rezki (Sony) } 819*68ad4a33SUladzislau Rezki (Sony) 820*68ad4a33SUladzislau Rezki (Sony) return type; 821*68ad4a33SUladzislau Rezki (Sony) } 822*68ad4a33SUladzislau Rezki (Sony) 823*68ad4a33SUladzislau Rezki (Sony) static __always_inline int 824*68ad4a33SUladzislau Rezki (Sony) adjust_va_to_fit_type(struct vmap_area *va, 825*68ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size, 826*68ad4a33SUladzislau Rezki (Sony) enum fit_type type) 827*68ad4a33SUladzislau Rezki (Sony) { 828*68ad4a33SUladzislau Rezki (Sony) struct vmap_area *lva; 829*68ad4a33SUladzislau Rezki (Sony) 830*68ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) { 831*68ad4a33SUladzislau Rezki (Sony) /* 832*68ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits. 833*68ad4a33SUladzislau Rezki (Sony) * 834*68ad4a33SUladzislau Rezki (Sony) * | | 835*68ad4a33SUladzislau Rezki (Sony) * V NVA V 836*68ad4a33SUladzislau Rezki (Sony) * |---------------| 837*68ad4a33SUladzislau Rezki (Sony) */ 838*68ad4a33SUladzislau Rezki (Sony) unlink_va(va, &free_vmap_area_root); 839*68ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 840*68ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) { 841*68ad4a33SUladzislau Rezki (Sony) /* 842*68ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA. 843*68ad4a33SUladzislau Rezki (Sony) * 844*68ad4a33SUladzislau Rezki (Sony) * | | 845*68ad4a33SUladzislau Rezki (Sony) * V NVA V R 846*68ad4a33SUladzislau Rezki (Sony) * |-------|-------| 847*68ad4a33SUladzislau Rezki (Sony) */ 848*68ad4a33SUladzislau Rezki (Sony) va->va_start += size; 849*68ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) { 850*68ad4a33SUladzislau Rezki (Sony) /* 851*68ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA. 852*68ad4a33SUladzislau Rezki (Sony) * 853*68ad4a33SUladzislau Rezki (Sony) * | | 854*68ad4a33SUladzislau Rezki (Sony) * L V NVA V 855*68ad4a33SUladzislau Rezki (Sony) * |-------|-------| 856*68ad4a33SUladzislau Rezki (Sony) */ 857*68ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr; 858*68ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) { 859*68ad4a33SUladzislau Rezki (Sony) /* 860*68ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA. 861*68ad4a33SUladzislau Rezki (Sony) * 862*68ad4a33SUladzislau Rezki (Sony) * | | 863*68ad4a33SUladzislau Rezki (Sony) * L V NVA V R 864*68ad4a33SUladzislau Rezki (Sony) * |---|-------|---| 865*68ad4a33SUladzislau Rezki (Sony) */ 866*68ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 867*68ad4a33SUladzislau Rezki (Sony) if (unlikely(!lva)) 868*68ad4a33SUladzislau Rezki (Sony) return -1; 869*68ad4a33SUladzislau Rezki (Sony) 870*68ad4a33SUladzislau Rezki (Sony) /* 871*68ad4a33SUladzislau Rezki (Sony) * Build the remainder. 872*68ad4a33SUladzislau Rezki (Sony) */ 873*68ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start; 874*68ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr; 875*68ad4a33SUladzislau Rezki (Sony) 876*68ad4a33SUladzislau Rezki (Sony) /* 877*68ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size. 878*68ad4a33SUladzislau Rezki (Sony) */ 879*68ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size; 880*68ad4a33SUladzislau Rezki (Sony) } else { 881*68ad4a33SUladzislau Rezki (Sony) return -1; 882*68ad4a33SUladzislau Rezki (Sony) } 883*68ad4a33SUladzislau Rezki (Sony) 884*68ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) { 885*68ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 886*68ad4a33SUladzislau Rezki (Sony) 887*68ad4a33SUladzislau Rezki (Sony) if (type == NE_FIT_TYPE) 888*68ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, 889*68ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list); 890*68ad4a33SUladzislau Rezki (Sony) } 891*68ad4a33SUladzislau Rezki (Sony) 892*68ad4a33SUladzislau Rezki (Sony) return 0; 893*68ad4a33SUladzislau Rezki (Sony) } 894*68ad4a33SUladzislau Rezki (Sony) 895*68ad4a33SUladzislau Rezki (Sony) /* 896*68ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success. 897*68ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure. 898*68ad4a33SUladzislau Rezki (Sony) */ 899*68ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 900*68ad4a33SUladzislau Rezki (Sony) __alloc_vmap_area(unsigned long size, unsigned long align, 901*68ad4a33SUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend, int node) 902*68ad4a33SUladzislau Rezki (Sony) { 903*68ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 904*68ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 905*68ad4a33SUladzislau Rezki (Sony) enum fit_type type; 906*68ad4a33SUladzislau Rezki (Sony) int ret; 907*68ad4a33SUladzislau Rezki (Sony) 908*68ad4a33SUladzislau Rezki (Sony) va = find_vmap_lowest_match(size, align, vstart); 909*68ad4a33SUladzislau Rezki (Sony) if (unlikely(!va)) 910*68ad4a33SUladzislau Rezki (Sony) return vend; 911*68ad4a33SUladzislau Rezki (Sony) 912*68ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 913*68ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 914*68ad4a33SUladzislau Rezki (Sony) else 915*68ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 916*68ad4a33SUladzislau Rezki (Sony) 917*68ad4a33SUladzislau Rezki (Sony) /* Check the "vend" restriction. */ 918*68ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size > vend) 919*68ad4a33SUladzislau Rezki (Sony) return vend; 920*68ad4a33SUladzislau Rezki (Sony) 921*68ad4a33SUladzislau Rezki (Sony) /* Classify what we have found. */ 922*68ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, nva_start_addr, size); 923*68ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 924*68ad4a33SUladzislau Rezki (Sony) return vend; 925*68ad4a33SUladzislau Rezki (Sony) 926*68ad4a33SUladzislau Rezki (Sony) /* Update the free vmap_area. */ 927*68ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); 928*68ad4a33SUladzislau Rezki (Sony) if (ret) 929*68ad4a33SUladzislau Rezki (Sony) return vend; 930*68ad4a33SUladzislau Rezki (Sony) 931*68ad4a33SUladzislau Rezki (Sony) return nva_start_addr; 932*68ad4a33SUladzislau Rezki (Sony) } 9334da56b99SChris Wilson 934db64fe02SNick Piggin /* 935db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 936db64fe02SNick Piggin * vstart and vend. 937db64fe02SNick Piggin */ 938db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 939db64fe02SNick Piggin unsigned long align, 940db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 941db64fe02SNick Piggin int node, gfp_t gfp_mask) 942db64fe02SNick Piggin { 943db64fe02SNick Piggin struct vmap_area *va; 9441da177e4SLinus Torvalds unsigned long addr; 945db64fe02SNick Piggin int purged = 0; 946db64fe02SNick Piggin 9477766970cSNick Piggin BUG_ON(!size); 948891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 94989699605SNick Piggin BUG_ON(!is_power_of_2(align)); 950db64fe02SNick Piggin 951*68ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized)) 952*68ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY); 953*68ad4a33SUladzislau Rezki (Sony) 9545803ed29SChristoph Hellwig might_sleep(); 9554da56b99SChris Wilson 956*68ad4a33SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, 957db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 958db64fe02SNick Piggin if (unlikely(!va)) 959db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 960db64fe02SNick Piggin 9617f88f88fSCatalin Marinas /* 9627f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 9637f88f88fSCatalin Marinas * to avoid false negatives. 9647f88f88fSCatalin Marinas */ 9657f88f88fSCatalin Marinas kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); 9667f88f88fSCatalin Marinas 967db64fe02SNick Piggin retry: 968db64fe02SNick Piggin spin_lock(&vmap_area_lock); 969*68ad4a33SUladzislau Rezki (Sony) 97089699605SNick Piggin /* 971*68ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is 972*68ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path. 97389699605SNick Piggin */ 974*68ad4a33SUladzislau Rezki (Sony) addr = __alloc_vmap_area(size, align, vstart, vend, node); 975*68ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend)) 97689699605SNick Piggin goto overflow; 97789699605SNick Piggin 97889699605SNick Piggin va->va_start = addr; 97989699605SNick Piggin va->va_end = addr + size; 98089699605SNick Piggin va->flags = 0; 981*68ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 982*68ad4a33SUladzislau Rezki (Sony) 98389699605SNick Piggin spin_unlock(&vmap_area_lock); 98489699605SNick Piggin 98561e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 98689699605SNick Piggin BUG_ON(va->va_start < vstart); 98789699605SNick Piggin BUG_ON(va->va_end > vend); 98889699605SNick Piggin 98989699605SNick Piggin return va; 99089699605SNick Piggin 9917766970cSNick Piggin overflow: 992db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 993db64fe02SNick Piggin if (!purged) { 994db64fe02SNick Piggin purge_vmap_area_lazy(); 995db64fe02SNick Piggin purged = 1; 996db64fe02SNick Piggin goto retry; 997db64fe02SNick Piggin } 9984da56b99SChris Wilson 9994da56b99SChris Wilson if (gfpflags_allow_blocking(gfp_mask)) { 10004da56b99SChris Wilson unsigned long freed = 0; 10014da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 10024da56b99SChris Wilson if (freed > 0) { 10034da56b99SChris Wilson purged = 0; 10044da56b99SChris Wilson goto retry; 10054da56b99SChris Wilson } 10064da56b99SChris Wilson } 10074da56b99SChris Wilson 100803497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1009756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1010756a025fSJoe Perches size); 1011*68ad4a33SUladzislau Rezki (Sony) 1012*68ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1013db64fe02SNick Piggin return ERR_PTR(-EBUSY); 1014db64fe02SNick Piggin } 1015db64fe02SNick Piggin 10164da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 10174da56b99SChris Wilson { 10184da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 10194da56b99SChris Wilson } 10204da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 10214da56b99SChris Wilson 10224da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 10234da56b99SChris Wilson { 10244da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 10254da56b99SChris Wilson } 10264da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 10274da56b99SChris Wilson 1028db64fe02SNick Piggin static void __free_vmap_area(struct vmap_area *va) 1029db64fe02SNick Piggin { 1030db64fe02SNick Piggin BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 103189699605SNick Piggin 103289699605SNick Piggin /* 1033*68ad4a33SUladzislau Rezki (Sony) * Remove from the busy tree/list. 103489699605SNick Piggin */ 1035*68ad4a33SUladzislau Rezki (Sony) unlink_va(va, &vmap_area_root); 1036db64fe02SNick Piggin 1037ca23e405STejun Heo /* 1038*68ad4a33SUladzislau Rezki (Sony) * Merge VA with its neighbors, otherwise just add it. 1039ca23e405STejun Heo */ 1040*68ad4a33SUladzislau Rezki (Sony) merge_or_add_vmap_area(va, 1041*68ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list); 1042db64fe02SNick Piggin } 1043db64fe02SNick Piggin 1044db64fe02SNick Piggin /* 1045db64fe02SNick Piggin * Free a region of KVA allocated by alloc_vmap_area 1046db64fe02SNick Piggin */ 1047db64fe02SNick Piggin static void free_vmap_area(struct vmap_area *va) 1048db64fe02SNick Piggin { 1049db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1050db64fe02SNick Piggin __free_vmap_area(va); 1051db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 1052db64fe02SNick Piggin } 1053db64fe02SNick Piggin 1054db64fe02SNick Piggin /* 1055db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 1056db64fe02SNick Piggin */ 1057db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 1058db64fe02SNick Piggin { 1059db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 1060db64fe02SNick Piggin } 1061db64fe02SNick Piggin 1062db64fe02SNick Piggin /* 1063db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 1064db64fe02SNick Piggin * before attempting to purge with a TLB flush. 1065db64fe02SNick Piggin * 1066db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 1067db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 1068db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 1069db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 1070db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 1071db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 1072db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 1073db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 1074db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 1075db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 1076db64fe02SNick Piggin * becomes a problem on bigger systems. 1077db64fe02SNick Piggin */ 1078db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 1079db64fe02SNick Piggin { 1080db64fe02SNick Piggin unsigned int log; 1081db64fe02SNick Piggin 1082db64fe02SNick Piggin log = fls(num_online_cpus()); 1083db64fe02SNick Piggin 1084db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1085db64fe02SNick Piggin } 1086db64fe02SNick Piggin 10874d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1088db64fe02SNick Piggin 10890574ecd1SChristoph Hellwig /* 10900574ecd1SChristoph Hellwig * Serialize vmap purging. There is no actual criticial section protected 10910574ecd1SChristoph Hellwig * by this look, but we want to avoid concurrent calls for performance 10920574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 10930574ecd1SChristoph Hellwig */ 1094f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 10950574ecd1SChristoph Hellwig 109602b709dfSNick Piggin /* for per-CPU blocks */ 109702b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 109802b709dfSNick Piggin 1099db64fe02SNick Piggin /* 11003ee48b6aSCliff Wickman * called before a call to iounmap() if the caller wants vm_area_struct's 11013ee48b6aSCliff Wickman * immediately freed. 11023ee48b6aSCliff Wickman */ 11033ee48b6aSCliff Wickman void set_iounmap_nonlazy(void) 11043ee48b6aSCliff Wickman { 11054d36e6f8SUladzislau Rezki (Sony) atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); 11063ee48b6aSCliff Wickman } 11073ee48b6aSCliff Wickman 11083ee48b6aSCliff Wickman /* 1109db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 1110db64fe02SNick Piggin */ 11110574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 1112db64fe02SNick Piggin { 11134d36e6f8SUladzislau Rezki (Sony) unsigned long resched_threshold; 111480c4bd7aSChris Wilson struct llist_node *valist; 1115db64fe02SNick Piggin struct vmap_area *va; 1116cbb76676SVegard Nossum struct vmap_area *n_va; 1117db64fe02SNick Piggin 11180574ecd1SChristoph Hellwig lockdep_assert_held(&vmap_purge_lock); 111902b709dfSNick Piggin 112080c4bd7aSChris Wilson valist = llist_del_all(&vmap_purge_list); 112168571be9SUladzislau Rezki (Sony) if (unlikely(valist == NULL)) 112268571be9SUladzislau Rezki (Sony) return false; 112368571be9SUladzislau Rezki (Sony) 112468571be9SUladzislau Rezki (Sony) /* 112568571be9SUladzislau Rezki (Sony) * TODO: to calculate a flush range without looping. 112668571be9SUladzislau Rezki (Sony) * The list can be up to lazy_max_pages() elements. 112768571be9SUladzislau Rezki (Sony) */ 112880c4bd7aSChris Wilson llist_for_each_entry(va, valist, purge_list) { 11290574ecd1SChristoph Hellwig if (va->va_start < start) 11300574ecd1SChristoph Hellwig start = va->va_start; 11310574ecd1SChristoph Hellwig if (va->va_end > end) 11320574ecd1SChristoph Hellwig end = va->va_end; 1133db64fe02SNick Piggin } 1134db64fe02SNick Piggin 11350574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 11364d36e6f8SUladzislau Rezki (Sony) resched_threshold = lazy_max_pages() << 1; 1137db64fe02SNick Piggin 1138db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1139763b218dSJoel Fernandes llist_for_each_entry_safe(va, n_va, valist, purge_list) { 11404d36e6f8SUladzislau Rezki (Sony) unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 1141763b218dSJoel Fernandes 1142db64fe02SNick Piggin __free_vmap_area(va); 11434d36e6f8SUladzislau Rezki (Sony) atomic_long_sub(nr, &vmap_lazy_nr); 114468571be9SUladzislau Rezki (Sony) 11454d36e6f8SUladzislau Rezki (Sony) if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) 1146763b218dSJoel Fernandes cond_resched_lock(&vmap_area_lock); 1147763b218dSJoel Fernandes } 1148db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 11490574ecd1SChristoph Hellwig return true; 1150db64fe02SNick Piggin } 1151db64fe02SNick Piggin 1152db64fe02SNick Piggin /* 1153496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 1154496850e5SNick Piggin * is already purging. 1155496850e5SNick Piggin */ 1156496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 1157496850e5SNick Piggin { 1158f9e09977SChristoph Hellwig if (mutex_trylock(&vmap_purge_lock)) { 11590574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1160f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 11610574ecd1SChristoph Hellwig } 1162496850e5SNick Piggin } 1163496850e5SNick Piggin 1164496850e5SNick Piggin /* 1165db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 1166db64fe02SNick Piggin */ 1167db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 1168db64fe02SNick Piggin { 1169f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 11700574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 11710574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1172f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1173db64fe02SNick Piggin } 1174db64fe02SNick Piggin 1175db64fe02SNick Piggin /* 117664141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 117764141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 117864141da5SJeremy Fitzhardinge * previously. 1179db64fe02SNick Piggin */ 118064141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 1181db64fe02SNick Piggin { 11824d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy; 118380c4bd7aSChris Wilson 11844d36e6f8SUladzislau Rezki (Sony) nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 11854d36e6f8SUladzislau Rezki (Sony) PAGE_SHIFT, &vmap_lazy_nr); 118680c4bd7aSChris Wilson 118780c4bd7aSChris Wilson /* After this point, we may free va at any time */ 118880c4bd7aSChris Wilson llist_add(&va->purge_list, &vmap_purge_list); 118980c4bd7aSChris Wilson 119080c4bd7aSChris Wilson if (unlikely(nr_lazy > lazy_max_pages())) 1191496850e5SNick Piggin try_purge_vmap_area_lazy(); 1192db64fe02SNick Piggin } 1193db64fe02SNick Piggin 1194b29acbdcSNick Piggin /* 1195b29acbdcSNick Piggin * Free and unmap a vmap area 1196b29acbdcSNick Piggin */ 1197b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 1198b29acbdcSNick Piggin { 1199b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 1200c8eef01eSChristoph Hellwig unmap_vmap_area(va); 120182a2e924SChintan Pandya if (debug_pagealloc_enabled()) 120282a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end); 120382a2e924SChintan Pandya 1204c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 1205b29acbdcSNick Piggin } 1206b29acbdcSNick Piggin 1207db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 1208db64fe02SNick Piggin { 1209db64fe02SNick Piggin struct vmap_area *va; 1210db64fe02SNick Piggin 1211db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1212db64fe02SNick Piggin va = __find_vmap_area(addr); 1213db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 1214db64fe02SNick Piggin 1215db64fe02SNick Piggin return va; 1216db64fe02SNick Piggin } 1217db64fe02SNick Piggin 1218db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 1219db64fe02SNick Piggin 1220db64fe02SNick Piggin /* 1221db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 1222db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 1223db64fe02SNick Piggin */ 1224db64fe02SNick Piggin /* 1225db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 1226db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 1227db64fe02SNick Piggin * instead (we just need a rough idea) 1228db64fe02SNick Piggin */ 1229db64fe02SNick Piggin #if BITS_PER_LONG == 32 1230db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 1231db64fe02SNick Piggin #else 1232db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 1233db64fe02SNick Piggin #endif 1234db64fe02SNick Piggin 1235db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 1236db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 1237db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 1238db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 1239db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 1240db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 1241f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 1242f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 1243db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 1244f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 1245db64fe02SNick Piggin 1246db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 1247db64fe02SNick Piggin 1248db64fe02SNick Piggin struct vmap_block_queue { 1249db64fe02SNick Piggin spinlock_t lock; 1250db64fe02SNick Piggin struct list_head free; 1251db64fe02SNick Piggin }; 1252db64fe02SNick Piggin 1253db64fe02SNick Piggin struct vmap_block { 1254db64fe02SNick Piggin spinlock_t lock; 1255db64fe02SNick Piggin struct vmap_area *va; 1256db64fe02SNick Piggin unsigned long free, dirty; 12577d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 1258db64fe02SNick Piggin struct list_head free_list; 1259db64fe02SNick Piggin struct rcu_head rcu_head; 126002b709dfSNick Piggin struct list_head purge; 1261db64fe02SNick Piggin }; 1262db64fe02SNick Piggin 1263db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 1264db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 1265db64fe02SNick Piggin 1266db64fe02SNick Piggin /* 1267db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 1268db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 1269db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 1270db64fe02SNick Piggin */ 1271db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 1272db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 1273db64fe02SNick Piggin 1274db64fe02SNick Piggin /* 1275db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 1276db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 1277db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 1278db64fe02SNick Piggin * big problem. 1279db64fe02SNick Piggin */ 1280db64fe02SNick Piggin 1281db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 1282db64fe02SNick Piggin { 1283db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 1284db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 1285db64fe02SNick Piggin return addr; 1286db64fe02SNick Piggin } 1287db64fe02SNick Piggin 1288cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 1289cf725ce2SRoman Pen { 1290cf725ce2SRoman Pen unsigned long addr; 1291cf725ce2SRoman Pen 1292cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 1293cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 1294cf725ce2SRoman Pen return (void *)addr; 1295cf725ce2SRoman Pen } 1296cf725ce2SRoman Pen 1297cf725ce2SRoman Pen /** 1298cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 1299cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 1300cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 1301cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 1302cf725ce2SRoman Pen * 1303a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 1304cf725ce2SRoman Pen */ 1305cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 1306db64fe02SNick Piggin { 1307db64fe02SNick Piggin struct vmap_block_queue *vbq; 1308db64fe02SNick Piggin struct vmap_block *vb; 1309db64fe02SNick Piggin struct vmap_area *va; 1310db64fe02SNick Piggin unsigned long vb_idx; 1311db64fe02SNick Piggin int node, err; 1312cf725ce2SRoman Pen void *vaddr; 1313db64fe02SNick Piggin 1314db64fe02SNick Piggin node = numa_node_id(); 1315db64fe02SNick Piggin 1316db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 1317db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 1318db64fe02SNick Piggin if (unlikely(!vb)) 1319db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1320db64fe02SNick Piggin 1321db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 1322db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 1323db64fe02SNick Piggin node, gfp_mask); 1324ddf9c6d4STobias Klauser if (IS_ERR(va)) { 1325db64fe02SNick Piggin kfree(vb); 1326e7d86340SJulia Lawall return ERR_CAST(va); 1327db64fe02SNick Piggin } 1328db64fe02SNick Piggin 1329db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 1330db64fe02SNick Piggin if (unlikely(err)) { 1331db64fe02SNick Piggin kfree(vb); 1332db64fe02SNick Piggin free_vmap_area(va); 1333db64fe02SNick Piggin return ERR_PTR(err); 1334db64fe02SNick Piggin } 1335db64fe02SNick Piggin 1336cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 1337db64fe02SNick Piggin spin_lock_init(&vb->lock); 1338db64fe02SNick Piggin vb->va = va; 1339cf725ce2SRoman Pen /* At least something should be left free */ 1340cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 1341cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 1342db64fe02SNick Piggin vb->dirty = 0; 13437d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 13447d61bfe8SRoman Pen vb->dirty_max = 0; 1345db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 1346db64fe02SNick Piggin 1347db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 1348db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 1349db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 1350db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 1351db64fe02SNick Piggin BUG_ON(err); 1352db64fe02SNick Piggin radix_tree_preload_end(); 1353db64fe02SNick Piggin 1354db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1355db64fe02SNick Piggin spin_lock(&vbq->lock); 135668ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 1357db64fe02SNick Piggin spin_unlock(&vbq->lock); 13583f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1359db64fe02SNick Piggin 1360cf725ce2SRoman Pen return vaddr; 1361db64fe02SNick Piggin } 1362db64fe02SNick Piggin 1363db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 1364db64fe02SNick Piggin { 1365db64fe02SNick Piggin struct vmap_block *tmp; 1366db64fe02SNick Piggin unsigned long vb_idx; 1367db64fe02SNick Piggin 1368db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 1369db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 1370db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 1371db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 1372db64fe02SNick Piggin BUG_ON(tmp != vb); 1373db64fe02SNick Piggin 137464141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 137522a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 1376db64fe02SNick Piggin } 1377db64fe02SNick Piggin 137802b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 137902b709dfSNick Piggin { 138002b709dfSNick Piggin LIST_HEAD(purge); 138102b709dfSNick Piggin struct vmap_block *vb; 138202b709dfSNick Piggin struct vmap_block *n_vb; 138302b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 138402b709dfSNick Piggin 138502b709dfSNick Piggin rcu_read_lock(); 138602b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 138702b709dfSNick Piggin 138802b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 138902b709dfSNick Piggin continue; 139002b709dfSNick Piggin 139102b709dfSNick Piggin spin_lock(&vb->lock); 139202b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 139302b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 139402b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 13957d61bfe8SRoman Pen vb->dirty_min = 0; 13967d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 139702b709dfSNick Piggin spin_lock(&vbq->lock); 139802b709dfSNick Piggin list_del_rcu(&vb->free_list); 139902b709dfSNick Piggin spin_unlock(&vbq->lock); 140002b709dfSNick Piggin spin_unlock(&vb->lock); 140102b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 140202b709dfSNick Piggin } else 140302b709dfSNick Piggin spin_unlock(&vb->lock); 140402b709dfSNick Piggin } 140502b709dfSNick Piggin rcu_read_unlock(); 140602b709dfSNick Piggin 140702b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 140802b709dfSNick Piggin list_del(&vb->purge); 140902b709dfSNick Piggin free_vmap_block(vb); 141002b709dfSNick Piggin } 141102b709dfSNick Piggin } 141202b709dfSNick Piggin 141302b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 141402b709dfSNick Piggin { 141502b709dfSNick Piggin int cpu; 141602b709dfSNick Piggin 141702b709dfSNick Piggin for_each_possible_cpu(cpu) 141802b709dfSNick Piggin purge_fragmented_blocks(cpu); 141902b709dfSNick Piggin } 142002b709dfSNick Piggin 1421db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 1422db64fe02SNick Piggin { 1423db64fe02SNick Piggin struct vmap_block_queue *vbq; 1424db64fe02SNick Piggin struct vmap_block *vb; 1425cf725ce2SRoman Pen void *vaddr = NULL; 1426db64fe02SNick Piggin unsigned int order; 1427db64fe02SNick Piggin 1428891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1429db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1430aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 1431aa91c4d8SJan Kara /* 1432aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 1433aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 1434aa91c4d8SJan Kara * early. 1435aa91c4d8SJan Kara */ 1436aa91c4d8SJan Kara return NULL; 1437aa91c4d8SJan Kara } 1438db64fe02SNick Piggin order = get_order(size); 1439db64fe02SNick Piggin 1440db64fe02SNick Piggin rcu_read_lock(); 1441db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1442db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1443cf725ce2SRoman Pen unsigned long pages_off; 1444db64fe02SNick Piggin 1445db64fe02SNick Piggin spin_lock(&vb->lock); 1446cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 1447cf725ce2SRoman Pen spin_unlock(&vb->lock); 1448cf725ce2SRoman Pen continue; 1449cf725ce2SRoman Pen } 145002b709dfSNick Piggin 1451cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 1452cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 1453db64fe02SNick Piggin vb->free -= 1UL << order; 1454db64fe02SNick Piggin if (vb->free == 0) { 1455db64fe02SNick Piggin spin_lock(&vbq->lock); 1456de560423SNick Piggin list_del_rcu(&vb->free_list); 1457db64fe02SNick Piggin spin_unlock(&vbq->lock); 1458db64fe02SNick Piggin } 1459cf725ce2SRoman Pen 1460db64fe02SNick Piggin spin_unlock(&vb->lock); 1461db64fe02SNick Piggin break; 1462db64fe02SNick Piggin } 146302b709dfSNick Piggin 14643f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1465db64fe02SNick Piggin rcu_read_unlock(); 1466db64fe02SNick Piggin 1467cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 1468cf725ce2SRoman Pen if (!vaddr) 1469cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 1470db64fe02SNick Piggin 1471cf725ce2SRoman Pen return vaddr; 1472db64fe02SNick Piggin } 1473db64fe02SNick Piggin 1474db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 1475db64fe02SNick Piggin { 1476db64fe02SNick Piggin unsigned long offset; 1477db64fe02SNick Piggin unsigned long vb_idx; 1478db64fe02SNick Piggin unsigned int order; 1479db64fe02SNick Piggin struct vmap_block *vb; 1480db64fe02SNick Piggin 1481891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1482db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1483b29acbdcSNick Piggin 1484b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 1485b29acbdcSNick Piggin 1486db64fe02SNick Piggin order = get_order(size); 1487db64fe02SNick Piggin 1488db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 14897d61bfe8SRoman Pen offset >>= PAGE_SHIFT; 1490db64fe02SNick Piggin 1491db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 1492db64fe02SNick Piggin rcu_read_lock(); 1493db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 1494db64fe02SNick Piggin rcu_read_unlock(); 1495db64fe02SNick Piggin BUG_ON(!vb); 1496db64fe02SNick Piggin 149764141da5SJeremy Fitzhardinge vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 149864141da5SJeremy Fitzhardinge 149982a2e924SChintan Pandya if (debug_pagealloc_enabled()) 150082a2e924SChintan Pandya flush_tlb_kernel_range((unsigned long)addr, 150182a2e924SChintan Pandya (unsigned long)addr + size); 150282a2e924SChintan Pandya 1503db64fe02SNick Piggin spin_lock(&vb->lock); 15047d61bfe8SRoman Pen 15057d61bfe8SRoman Pen /* Expand dirty range */ 15067d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 15077d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 1508d086817dSMinChan Kim 1509db64fe02SNick Piggin vb->dirty += 1UL << order; 1510db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 1511de560423SNick Piggin BUG_ON(vb->free); 1512db64fe02SNick Piggin spin_unlock(&vb->lock); 1513db64fe02SNick Piggin free_vmap_block(vb); 1514db64fe02SNick Piggin } else 1515db64fe02SNick Piggin spin_unlock(&vb->lock); 1516db64fe02SNick Piggin } 1517db64fe02SNick Piggin 1518868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 1519db64fe02SNick Piggin { 1520db64fe02SNick Piggin int cpu; 1521db64fe02SNick Piggin 15229b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 15239b463334SJeremy Fitzhardinge return; 15249b463334SJeremy Fitzhardinge 15255803ed29SChristoph Hellwig might_sleep(); 15265803ed29SChristoph Hellwig 1527db64fe02SNick Piggin for_each_possible_cpu(cpu) { 1528db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1529db64fe02SNick Piggin struct vmap_block *vb; 1530db64fe02SNick Piggin 1531db64fe02SNick Piggin rcu_read_lock(); 1532db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1533db64fe02SNick Piggin spin_lock(&vb->lock); 15347d61bfe8SRoman Pen if (vb->dirty) { 15357d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 1536db64fe02SNick Piggin unsigned long s, e; 1537b136be5eSJoonsoo Kim 15387d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 15397d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 1540db64fe02SNick Piggin 15417d61bfe8SRoman Pen start = min(s, start); 15427d61bfe8SRoman Pen end = max(e, end); 15437d61bfe8SRoman Pen 1544db64fe02SNick Piggin flush = 1; 1545db64fe02SNick Piggin } 1546db64fe02SNick Piggin spin_unlock(&vb->lock); 1547db64fe02SNick Piggin } 1548db64fe02SNick Piggin rcu_read_unlock(); 1549db64fe02SNick Piggin } 1550db64fe02SNick Piggin 1551f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 15520574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 15530574ecd1SChristoph Hellwig if (!__purge_vmap_area_lazy(start, end) && flush) 15540574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 1555f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1556db64fe02SNick Piggin } 1557868b104dSRick Edgecombe 1558868b104dSRick Edgecombe /** 1559868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 1560868b104dSRick Edgecombe * 1561868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1562868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you 1563868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual 1564868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries 1565868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping). 1566868b104dSRick Edgecombe * 1567868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1568868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases 1569868b104dSRick Edgecombe * from the vmap layer. 1570868b104dSRick Edgecombe */ 1571868b104dSRick Edgecombe void vm_unmap_aliases(void) 1572868b104dSRick Edgecombe { 1573868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 1574868b104dSRick Edgecombe int flush = 0; 1575868b104dSRick Edgecombe 1576868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush); 1577868b104dSRick Edgecombe } 1578db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1579db64fe02SNick Piggin 1580db64fe02SNick Piggin /** 1581db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1582db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 1583db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1584db64fe02SNick Piggin */ 1585db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 1586db64fe02SNick Piggin { 158765ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1588db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 15899c3acf60SChristoph Hellwig struct vmap_area *va; 1590db64fe02SNick Piggin 15915803ed29SChristoph Hellwig might_sleep(); 1592db64fe02SNick Piggin BUG_ON(!addr); 1593db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 1594db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 1595a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 1596db64fe02SNick Piggin 15979c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 159805e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size); 1599db64fe02SNick Piggin vb_free(mem, size); 16009c3acf60SChristoph Hellwig return; 16019c3acf60SChristoph Hellwig } 16029c3acf60SChristoph Hellwig 16039c3acf60SChristoph Hellwig va = find_vmap_area(addr); 16049c3acf60SChristoph Hellwig BUG_ON(!va); 160505e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start, 160605e3ff95SChintan Pandya (va->va_end - va->va_start)); 16079c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 1608db64fe02SNick Piggin } 1609db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 1610db64fe02SNick Piggin 1611db64fe02SNick Piggin /** 1612db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1613db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 1614db64fe02SNick Piggin * @count: number of pages 1615db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 1616db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1617e99c97adSRandy Dunlap * 161836437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 161936437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 162036437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 162136437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 162236437638SGioh Kim * the end. Please use this function for short-lived objects. 162336437638SGioh Kim * 1624e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 1625db64fe02SNick Piggin */ 1626db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1627db64fe02SNick Piggin { 162865ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1629db64fe02SNick Piggin unsigned long addr; 1630db64fe02SNick Piggin void *mem; 1631db64fe02SNick Piggin 1632db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 1633db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 1634db64fe02SNick Piggin if (IS_ERR(mem)) 1635db64fe02SNick Piggin return NULL; 1636db64fe02SNick Piggin addr = (unsigned long)mem; 1637db64fe02SNick Piggin } else { 1638db64fe02SNick Piggin struct vmap_area *va; 1639db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 1640db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1641db64fe02SNick Piggin if (IS_ERR(va)) 1642db64fe02SNick Piggin return NULL; 1643db64fe02SNick Piggin 1644db64fe02SNick Piggin addr = va->va_start; 1645db64fe02SNick Piggin mem = (void *)addr; 1646db64fe02SNick Piggin } 1647db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1648db64fe02SNick Piggin vm_unmap_ram(mem, count); 1649db64fe02SNick Piggin return NULL; 1650db64fe02SNick Piggin } 1651db64fe02SNick Piggin return mem; 1652db64fe02SNick Piggin } 1653db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 1654db64fe02SNick Piggin 16554341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 165692eac168SMike Rapoport 1657f0aa6617STejun Heo /** 1658be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 1659be9b7335SNicolas Pitre * @vm: vm_struct to add 1660be9b7335SNicolas Pitre * 1661be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 1662be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1663be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 1664be9b7335SNicolas Pitre * 1665be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1666be9b7335SNicolas Pitre */ 1667be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 1668be9b7335SNicolas Pitre { 1669be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 1670be9b7335SNicolas Pitre 1671be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 1672be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1673be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 1674be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 1675be9b7335SNicolas Pitre break; 1676be9b7335SNicolas Pitre } else 1677be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 1678be9b7335SNicolas Pitre } 1679be9b7335SNicolas Pitre vm->next = *p; 1680be9b7335SNicolas Pitre *p = vm; 1681be9b7335SNicolas Pitre } 1682be9b7335SNicolas Pitre 1683be9b7335SNicolas Pitre /** 1684f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 1685f0aa6617STejun Heo * @vm: vm_struct to register 1686c0c0a293STejun Heo * @align: requested alignment 1687f0aa6617STejun Heo * 1688f0aa6617STejun Heo * This function is used to register kernel vm area before 1689f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 1690f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 1691f0aa6617STejun Heo * vm->addr contains the allocated address. 1692f0aa6617STejun Heo * 1693f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1694f0aa6617STejun Heo */ 1695c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1696f0aa6617STejun Heo { 1697f0aa6617STejun Heo static size_t vm_init_off __initdata; 1698c0c0a293STejun Heo unsigned long addr; 1699f0aa6617STejun Heo 1700c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 1701c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1702c0c0a293STejun Heo 1703c0c0a293STejun Heo vm->addr = (void *)addr; 1704f0aa6617STejun Heo 1705be9b7335SNicolas Pitre vm_area_add_early(vm); 1706f0aa6617STejun Heo } 1707f0aa6617STejun Heo 1708*68ad4a33SUladzislau Rezki (Sony) static void vmap_init_free_space(void) 1709*68ad4a33SUladzislau Rezki (Sony) { 1710*68ad4a33SUladzislau Rezki (Sony) unsigned long vmap_start = 1; 1711*68ad4a33SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX; 1712*68ad4a33SUladzislau Rezki (Sony) struct vmap_area *busy, *free; 1713*68ad4a33SUladzislau Rezki (Sony) 1714*68ad4a33SUladzislau Rezki (Sony) /* 1715*68ad4a33SUladzislau Rezki (Sony) * B F B B B F 1716*68ad4a33SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|- 1717*68ad4a33SUladzislau Rezki (Sony) * | The KVA space | 1718*68ad4a33SUladzislau Rezki (Sony) * |<--------------------------------->| 1719*68ad4a33SUladzislau Rezki (Sony) */ 1720*68ad4a33SUladzislau Rezki (Sony) list_for_each_entry(busy, &vmap_area_list, list) { 1721*68ad4a33SUladzislau Rezki (Sony) if (busy->va_start - vmap_start > 0) { 1722*68ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 1723*68ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 1724*68ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 1725*68ad4a33SUladzislau Rezki (Sony) free->va_end = busy->va_start; 1726*68ad4a33SUladzislau Rezki (Sony) 1727*68ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 1728*68ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 1729*68ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 1730*68ad4a33SUladzislau Rezki (Sony) } 1731*68ad4a33SUladzislau Rezki (Sony) } 1732*68ad4a33SUladzislau Rezki (Sony) 1733*68ad4a33SUladzislau Rezki (Sony) vmap_start = busy->va_end; 1734*68ad4a33SUladzislau Rezki (Sony) } 1735*68ad4a33SUladzislau Rezki (Sony) 1736*68ad4a33SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) { 1737*68ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 1738*68ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 1739*68ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 1740*68ad4a33SUladzislau Rezki (Sony) free->va_end = vmap_end; 1741*68ad4a33SUladzislau Rezki (Sony) 1742*68ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 1743*68ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 1744*68ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 1745*68ad4a33SUladzislau Rezki (Sony) } 1746*68ad4a33SUladzislau Rezki (Sony) } 1747*68ad4a33SUladzislau Rezki (Sony) } 1748*68ad4a33SUladzislau Rezki (Sony) 1749db64fe02SNick Piggin void __init vmalloc_init(void) 1750db64fe02SNick Piggin { 1751822c18f2SIvan Kokshaysky struct vmap_area *va; 1752822c18f2SIvan Kokshaysky struct vm_struct *tmp; 1753db64fe02SNick Piggin int i; 1754db64fe02SNick Piggin 1755*68ad4a33SUladzislau Rezki (Sony) /* 1756*68ad4a33SUladzislau Rezki (Sony) * Create the cache for vmap_area objects. 1757*68ad4a33SUladzislau Rezki (Sony) */ 1758*68ad4a33SUladzislau Rezki (Sony) vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 1759*68ad4a33SUladzislau Rezki (Sony) 1760db64fe02SNick Piggin for_each_possible_cpu(i) { 1761db64fe02SNick Piggin struct vmap_block_queue *vbq; 176232fcfd40SAl Viro struct vfree_deferred *p; 1763db64fe02SNick Piggin 1764db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1765db64fe02SNick Piggin spin_lock_init(&vbq->lock); 1766db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 176732fcfd40SAl Viro p = &per_cpu(vfree_deferred, i); 176832fcfd40SAl Viro init_llist_head(&p->list); 176932fcfd40SAl Viro INIT_WORK(&p->wq, free_work); 1770db64fe02SNick Piggin } 17719b463334SJeremy Fitzhardinge 1772822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1773822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 1774*68ad4a33SUladzislau Rezki (Sony) va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 1775*68ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va)) 1776*68ad4a33SUladzislau Rezki (Sony) continue; 1777*68ad4a33SUladzislau Rezki (Sony) 1778dbda591dSKyongHo va->flags = VM_VM_AREA; 1779822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1780822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1781dbda591dSKyongHo va->vm = tmp; 1782*68ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 1783822c18f2SIvan Kokshaysky } 1784ca23e405STejun Heo 1785*68ad4a33SUladzislau Rezki (Sony) /* 1786*68ad4a33SUladzislau Rezki (Sony) * Now we can initialize a free vmap space. 1787*68ad4a33SUladzislau Rezki (Sony) */ 1788*68ad4a33SUladzislau Rezki (Sony) vmap_init_free_space(); 17899b463334SJeremy Fitzhardinge vmap_initialized = true; 1790db64fe02SNick Piggin } 1791db64fe02SNick Piggin 17928fc48985STejun Heo /** 17938fc48985STejun Heo * map_kernel_range_noflush - map kernel VM area with the specified pages 17948fc48985STejun Heo * @addr: start of the VM area to map 17958fc48985STejun Heo * @size: size of the VM area to map 17968fc48985STejun Heo * @prot: page protection flags to use 17978fc48985STejun Heo * @pages: pages to map 17988fc48985STejun Heo * 17998fc48985STejun Heo * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 18008fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 18018fc48985STejun Heo * friends. 18028fc48985STejun Heo * 18038fc48985STejun Heo * NOTE: 18048fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 18058fc48985STejun Heo * responsible for calling flush_cache_vmap() on to-be-mapped areas 18068fc48985STejun Heo * before calling this function. 18078fc48985STejun Heo * 18088fc48985STejun Heo * RETURNS: 18098fc48985STejun Heo * The number of pages mapped on success, -errno on failure. 18108fc48985STejun Heo */ 18118fc48985STejun Heo int map_kernel_range_noflush(unsigned long addr, unsigned long size, 18128fc48985STejun Heo pgprot_t prot, struct page **pages) 18138fc48985STejun Heo { 18148fc48985STejun Heo return vmap_page_range_noflush(addr, addr + size, prot, pages); 18158fc48985STejun Heo } 18168fc48985STejun Heo 18178fc48985STejun Heo /** 18188fc48985STejun Heo * unmap_kernel_range_noflush - unmap kernel VM area 18198fc48985STejun Heo * @addr: start of the VM area to unmap 18208fc48985STejun Heo * @size: size of the VM area to unmap 18218fc48985STejun Heo * 18228fc48985STejun Heo * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 18238fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 18248fc48985STejun Heo * friends. 18258fc48985STejun Heo * 18268fc48985STejun Heo * NOTE: 18278fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 18288fc48985STejun Heo * responsible for calling flush_cache_vunmap() on to-be-mapped areas 18298fc48985STejun Heo * before calling this function and flush_tlb_kernel_range() after. 18308fc48985STejun Heo */ 18318fc48985STejun Heo void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 18328fc48985STejun Heo { 18338fc48985STejun Heo vunmap_page_range(addr, addr + size); 18348fc48985STejun Heo } 183581e88fdcSHuang Ying EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 18368fc48985STejun Heo 18378fc48985STejun Heo /** 18388fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 18398fc48985STejun Heo * @addr: start of the VM area to unmap 18408fc48985STejun Heo * @size: size of the VM area to unmap 18418fc48985STejun Heo * 18428fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 18438fc48985STejun Heo * the unmapping and tlb after. 18448fc48985STejun Heo */ 1845db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 1846db64fe02SNick Piggin { 1847db64fe02SNick Piggin unsigned long end = addr + size; 1848f6fcba70STejun Heo 1849f6fcba70STejun Heo flush_cache_vunmap(addr, end); 1850db64fe02SNick Piggin vunmap_page_range(addr, end); 1851db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 1852db64fe02SNick Piggin } 185393ef6d6cSMinchan Kim EXPORT_SYMBOL_GPL(unmap_kernel_range); 1854db64fe02SNick Piggin 1855f6f8ed47SWANG Chao int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) 1856db64fe02SNick Piggin { 1857db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 1858762216abSWanpeng Li unsigned long end = addr + get_vm_area_size(area); 1859db64fe02SNick Piggin int err; 1860db64fe02SNick Piggin 1861f6f8ed47SWANG Chao err = vmap_page_range(addr, end, prot, pages); 1862db64fe02SNick Piggin 1863f6f8ed47SWANG Chao return err > 0 ? 0 : err; 1864db64fe02SNick Piggin } 1865db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 1866db64fe02SNick Piggin 1867f5252e00SMitsuo Hayasaka static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 18685e6cafc8SMarek Szyprowski unsigned long flags, const void *caller) 1869cf88c790STejun Heo { 1870c69480adSJoonsoo Kim spin_lock(&vmap_area_lock); 1871cf88c790STejun Heo vm->flags = flags; 1872cf88c790STejun Heo vm->addr = (void *)va->va_start; 1873cf88c790STejun Heo vm->size = va->va_end - va->va_start; 1874cf88c790STejun Heo vm->caller = caller; 1875db1aecafSMinchan Kim va->vm = vm; 1876cf88c790STejun Heo va->flags |= VM_VM_AREA; 1877c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 1878f5252e00SMitsuo Hayasaka } 1879cf88c790STejun Heo 188020fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 1881f5252e00SMitsuo Hayasaka { 1882d4033afdSJoonsoo Kim /* 188320fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 1884d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 1885d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 1886d4033afdSJoonsoo Kim */ 1887d4033afdSJoonsoo Kim smp_wmb(); 188820fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 1889cf88c790STejun Heo } 1890cf88c790STejun Heo 1891db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 18922dca6999SDavid Miller unsigned long align, unsigned long flags, unsigned long start, 18935e6cafc8SMarek Szyprowski unsigned long end, int node, gfp_t gfp_mask, const void *caller) 1894db64fe02SNick Piggin { 18950006526dSKautuk Consul struct vmap_area *va; 1896db64fe02SNick Piggin struct vm_struct *area; 18971da177e4SLinus Torvalds 189852fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 18991da177e4SLinus Torvalds size = PAGE_ALIGN(size); 190031be8309SOGAWA Hirofumi if (unlikely(!size)) 190131be8309SOGAWA Hirofumi return NULL; 19021da177e4SLinus Torvalds 1903252e5c6eSzijun_hu if (flags & VM_IOREMAP) 1904252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 1905252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 1906252e5c6eSzijun_hu 1907cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 19081da177e4SLinus Torvalds if (unlikely(!area)) 19091da177e4SLinus Torvalds return NULL; 19101da177e4SLinus Torvalds 191171394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 19121da177e4SLinus Torvalds size += PAGE_SIZE; 19131da177e4SLinus Torvalds 1914db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1915db64fe02SNick Piggin if (IS_ERR(va)) { 1916db64fe02SNick Piggin kfree(area); 1917db64fe02SNick Piggin return NULL; 19181da177e4SLinus Torvalds } 19191da177e4SLinus Torvalds 1920f5252e00SMitsuo Hayasaka setup_vmalloc_vm(area, va, flags, caller); 1921f5252e00SMitsuo Hayasaka 19221da177e4SLinus Torvalds return area; 19231da177e4SLinus Torvalds } 19241da177e4SLinus Torvalds 1925930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1926930fc45aSChristoph Lameter unsigned long start, unsigned long end) 1927930fc45aSChristoph Lameter { 192800ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 192900ef2d2fSDavid Rientjes GFP_KERNEL, __builtin_return_address(0)); 1930930fc45aSChristoph Lameter } 19315992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 1932930fc45aSChristoph Lameter 1933c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1934c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 19355e6cafc8SMarek Szyprowski const void *caller) 1936c2968612SBenjamin Herrenschmidt { 193700ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 193800ef2d2fSDavid Rientjes GFP_KERNEL, caller); 1939c2968612SBenjamin Herrenschmidt } 1940c2968612SBenjamin Herrenschmidt 19411da177e4SLinus Torvalds /** 1942183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 19431da177e4SLinus Torvalds * @size: size of the area 19441da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 19451da177e4SLinus Torvalds * 19461da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 19471da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 19481da177e4SLinus Torvalds * on success or %NULL on failure. 1949a862f68aSMike Rapoport * 1950a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure. 19511da177e4SLinus Torvalds */ 19521da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 19531da177e4SLinus Torvalds { 19542dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 195500ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 195600ef2d2fSDavid Rientjes __builtin_return_address(0)); 195723016969SChristoph Lameter } 195823016969SChristoph Lameter 195923016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 19605e6cafc8SMarek Szyprowski const void *caller) 196123016969SChristoph Lameter { 19622dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 196300ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 19641da177e4SLinus Torvalds } 19651da177e4SLinus Torvalds 1966e9da6e99SMarek Szyprowski /** 1967e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 1968e9da6e99SMarek Szyprowski * @addr: base address 1969e9da6e99SMarek Szyprowski * 1970e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 1971e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 1972e9da6e99SMarek Szyprowski * pointer valid. 1973a862f68aSMike Rapoport * 1974a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 1975e9da6e99SMarek Szyprowski */ 1976e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 197783342314SNick Piggin { 1978db64fe02SNick Piggin struct vmap_area *va; 197983342314SNick Piggin 1980db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1981db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) 1982db1aecafSMinchan Kim return va->vm; 198383342314SNick Piggin 19847856dfebSAndi Kleen return NULL; 19857856dfebSAndi Kleen } 19867856dfebSAndi Kleen 19871da177e4SLinus Torvalds /** 1988183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 19891da177e4SLinus Torvalds * @addr: base address 19901da177e4SLinus Torvalds * 19911da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 19921da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 19937856dfebSAndi Kleen * on SMP machines, except for its size or flags. 1994a862f68aSMike Rapoport * 1995a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 19961da177e4SLinus Torvalds */ 1997b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 19981da177e4SLinus Torvalds { 1999db64fe02SNick Piggin struct vmap_area *va; 2000db64fe02SNick Piggin 20015803ed29SChristoph Hellwig might_sleep(); 20025803ed29SChristoph Hellwig 2003db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 2004db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) { 2005db1aecafSMinchan Kim struct vm_struct *vm = va->vm; 2006f5252e00SMitsuo Hayasaka 2007c69480adSJoonsoo Kim spin_lock(&vmap_area_lock); 2008c69480adSJoonsoo Kim va->vm = NULL; 2009c69480adSJoonsoo Kim va->flags &= ~VM_VM_AREA; 201078c72746SYisheng Xie va->flags |= VM_LAZY_FREE; 2011c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2012c69480adSJoonsoo Kim 2013a5af5aa8SAndrey Ryabinin kasan_free_shadow(vm); 2014dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 2015dd32c279SKAMEZAWA Hiroyuki 2016db64fe02SNick Piggin return vm; 2017db64fe02SNick Piggin } 2018db64fe02SNick Piggin return NULL; 20191da177e4SLinus Torvalds } 20201da177e4SLinus Torvalds 2021868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area, 2022868b104dSRick Edgecombe int (*set_direct_map)(struct page *page)) 2023868b104dSRick Edgecombe { 2024868b104dSRick Edgecombe int i; 2025868b104dSRick Edgecombe 2026868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) 2027868b104dSRick Edgecombe if (page_address(area->pages[i])) 2028868b104dSRick Edgecombe set_direct_map(area->pages[i]); 2029868b104dSRick Edgecombe } 2030868b104dSRick Edgecombe 2031868b104dSRick Edgecombe /* Handle removing and resetting vm mappings related to the vm_struct. */ 2032868b104dSRick Edgecombe static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2033868b104dSRick Edgecombe { 2034868b104dSRick Edgecombe unsigned long addr = (unsigned long)area->addr; 2035868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2036868b104dSRick Edgecombe int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 2037868b104dSRick Edgecombe int i; 2038868b104dSRick Edgecombe 2039868b104dSRick Edgecombe /* 2040868b104dSRick Edgecombe * The below block can be removed when all architectures that have 2041868b104dSRick Edgecombe * direct map permissions also have set_direct_map_() implementations. 2042868b104dSRick Edgecombe * This is concerned with resetting the direct map any an vm alias with 2043868b104dSRick Edgecombe * execute permissions, without leaving a RW+X window. 2044868b104dSRick Edgecombe */ 2045868b104dSRick Edgecombe if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { 2046868b104dSRick Edgecombe set_memory_nx(addr, area->nr_pages); 2047868b104dSRick Edgecombe set_memory_rw(addr, area->nr_pages); 2048868b104dSRick Edgecombe } 2049868b104dSRick Edgecombe 2050868b104dSRick Edgecombe remove_vm_area(area->addr); 2051868b104dSRick Edgecombe 2052868b104dSRick Edgecombe /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 2053868b104dSRick Edgecombe if (!flush_reset) 2054868b104dSRick Edgecombe return; 2055868b104dSRick Edgecombe 2056868b104dSRick Edgecombe /* 2057868b104dSRick Edgecombe * If not deallocating pages, just do the flush of the VM area and 2058868b104dSRick Edgecombe * return. 2059868b104dSRick Edgecombe */ 2060868b104dSRick Edgecombe if (!deallocate_pages) { 2061868b104dSRick Edgecombe vm_unmap_aliases(); 2062868b104dSRick Edgecombe return; 2063868b104dSRick Edgecombe } 2064868b104dSRick Edgecombe 2065868b104dSRick Edgecombe /* 2066868b104dSRick Edgecombe * If execution gets here, flush the vm mapping and reset the direct 2067868b104dSRick Edgecombe * map. Find the start and end range of the direct mappings to make sure 2068868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map. 2069868b104dSRick Edgecombe */ 2070868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) { 2071868b104dSRick Edgecombe if (page_address(area->pages[i])) { 2072868b104dSRick Edgecombe start = min(addr, start); 2073868b104dSRick Edgecombe end = max(addr, end); 2074868b104dSRick Edgecombe } 2075868b104dSRick Edgecombe } 2076868b104dSRick Edgecombe 2077868b104dSRick Edgecombe /* 2078868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if 2079868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and 2080868b104dSRick Edgecombe * reset the direct map permissions to the default. 2081868b104dSRick Edgecombe */ 2082868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush); 2083868b104dSRick Edgecombe _vm_unmap_aliases(start, end, 1); 2084868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush); 2085868b104dSRick Edgecombe } 2086868b104dSRick Edgecombe 2087b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 20881da177e4SLinus Torvalds { 20891da177e4SLinus Torvalds struct vm_struct *area; 20901da177e4SLinus Torvalds 20911da177e4SLinus Torvalds if (!addr) 20921da177e4SLinus Torvalds return; 20931da177e4SLinus Torvalds 2094e69e9d4aSHATAYAMA Daisuke if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 2095ab15d9b4SDan Carpenter addr)) 20961da177e4SLinus Torvalds return; 20971da177e4SLinus Torvalds 20986ade2032SLiviu Dudau area = find_vm_area(addr); 20991da177e4SLinus Torvalds if (unlikely(!area)) { 21004c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 21011da177e4SLinus Torvalds addr); 21021da177e4SLinus Torvalds return; 21031da177e4SLinus Torvalds } 21041da177e4SLinus Torvalds 210505e3ff95SChintan Pandya debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 210605e3ff95SChintan Pandya debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 21079a11b49aSIngo Molnar 2108868b104dSRick Edgecombe vm_remove_mappings(area, deallocate_pages); 2109868b104dSRick Edgecombe 21101da177e4SLinus Torvalds if (deallocate_pages) { 21111da177e4SLinus Torvalds int i; 21121da177e4SLinus Torvalds 21131da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 2114bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 2115bf53d6f8SChristoph Lameter 2116bf53d6f8SChristoph Lameter BUG_ON(!page); 21174949148aSVladimir Davydov __free_pages(page, 0); 21181da177e4SLinus Torvalds } 21191da177e4SLinus Torvalds 2120244d63eeSDavid Rientjes kvfree(area->pages); 21211da177e4SLinus Torvalds } 21221da177e4SLinus Torvalds 21231da177e4SLinus Torvalds kfree(area); 21241da177e4SLinus Torvalds return; 21251da177e4SLinus Torvalds } 21261da177e4SLinus Torvalds 2127bf22e37aSAndrey Ryabinin static inline void __vfree_deferred(const void *addr) 2128bf22e37aSAndrey Ryabinin { 2129bf22e37aSAndrey Ryabinin /* 2130bf22e37aSAndrey Ryabinin * Use raw_cpu_ptr() because this can be called from preemptible 2131bf22e37aSAndrey Ryabinin * context. Preemption is absolutely fine here, because the llist_add() 2132bf22e37aSAndrey Ryabinin * implementation is lockless, so it works even if we are adding to 2133bf22e37aSAndrey Ryabinin * nother cpu's list. schedule_work() should be fine with this too. 2134bf22e37aSAndrey Ryabinin */ 2135bf22e37aSAndrey Ryabinin struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 2136bf22e37aSAndrey Ryabinin 2137bf22e37aSAndrey Ryabinin if (llist_add((struct llist_node *)addr, &p->list)) 2138bf22e37aSAndrey Ryabinin schedule_work(&p->wq); 2139bf22e37aSAndrey Ryabinin } 2140bf22e37aSAndrey Ryabinin 2141bf22e37aSAndrey Ryabinin /** 2142bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 2143bf22e37aSAndrey Ryabinin * @addr: memory base address 2144bf22e37aSAndrey Ryabinin * 2145bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 2146bf22e37aSAndrey Ryabinin * except NMIs. 2147bf22e37aSAndrey Ryabinin */ 2148bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 2149bf22e37aSAndrey Ryabinin { 2150bf22e37aSAndrey Ryabinin BUG_ON(in_nmi()); 2151bf22e37aSAndrey Ryabinin 2152bf22e37aSAndrey Ryabinin kmemleak_free(addr); 2153bf22e37aSAndrey Ryabinin 2154bf22e37aSAndrey Ryabinin if (!addr) 2155bf22e37aSAndrey Ryabinin return; 2156bf22e37aSAndrey Ryabinin __vfree_deferred(addr); 2157bf22e37aSAndrey Ryabinin } 2158bf22e37aSAndrey Ryabinin 2159c67dc624SRoman Penyaev static void __vfree(const void *addr) 2160c67dc624SRoman Penyaev { 2161c67dc624SRoman Penyaev if (unlikely(in_interrupt())) 2162c67dc624SRoman Penyaev __vfree_deferred(addr); 2163c67dc624SRoman Penyaev else 2164c67dc624SRoman Penyaev __vunmap(addr, 1); 2165c67dc624SRoman Penyaev } 2166c67dc624SRoman Penyaev 21671da177e4SLinus Torvalds /** 21681da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 21691da177e4SLinus Torvalds * @addr: memory base address 21701da177e4SLinus Torvalds * 2171183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 217280e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 217380e93effSPekka Enberg * NULL, no operation is performed. 21741da177e4SLinus Torvalds * 217532fcfd40SAl Viro * Must not be called in NMI context (strictly speaking, only if we don't 217632fcfd40SAl Viro * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 217732fcfd40SAl Viro * conventions for vfree() arch-depenedent would be a really bad idea) 217832fcfd40SAl Viro * 21793ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context. 21803ca4ea3aSAndrey Ryabinin * 21810e056eb5Smchehab@s-opensource.com * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) 21821da177e4SLinus Torvalds */ 2183b3bdda02SChristoph Lameter void vfree(const void *addr) 21841da177e4SLinus Torvalds { 218532fcfd40SAl Viro BUG_ON(in_nmi()); 218689219d37SCatalin Marinas 218789219d37SCatalin Marinas kmemleak_free(addr); 218889219d37SCatalin Marinas 2189a8dda165SAndrey Ryabinin might_sleep_if(!in_interrupt()); 2190a8dda165SAndrey Ryabinin 219132fcfd40SAl Viro if (!addr) 219232fcfd40SAl Viro return; 2193c67dc624SRoman Penyaev 2194c67dc624SRoman Penyaev __vfree(addr); 21951da177e4SLinus Torvalds } 21961da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 21971da177e4SLinus Torvalds 21981da177e4SLinus Torvalds /** 21991da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 22001da177e4SLinus Torvalds * @addr: memory base address 22011da177e4SLinus Torvalds * 22021da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 22031da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 22041da177e4SLinus Torvalds * 220580e93effSPekka Enberg * Must not be called in interrupt context. 22061da177e4SLinus Torvalds */ 2207b3bdda02SChristoph Lameter void vunmap(const void *addr) 22081da177e4SLinus Torvalds { 22091da177e4SLinus Torvalds BUG_ON(in_interrupt()); 221034754b69SPeter Zijlstra might_sleep(); 221132fcfd40SAl Viro if (addr) 22121da177e4SLinus Torvalds __vunmap(addr, 0); 22131da177e4SLinus Torvalds } 22141da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 22151da177e4SLinus Torvalds 22161da177e4SLinus Torvalds /** 22171da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 22181da177e4SLinus Torvalds * @pages: array of page pointers 22191da177e4SLinus Torvalds * @count: number of pages to map 22201da177e4SLinus Torvalds * @flags: vm_area->flags 22211da177e4SLinus Torvalds * @prot: page protection for the mapping 22221da177e4SLinus Torvalds * 22231da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 22241da177e4SLinus Torvalds * space. 2225a862f68aSMike Rapoport * 2226a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 22271da177e4SLinus Torvalds */ 22281da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 22291da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 22301da177e4SLinus Torvalds { 22311da177e4SLinus Torvalds struct vm_struct *area; 223265ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 22331da177e4SLinus Torvalds 223434754b69SPeter Zijlstra might_sleep(); 223534754b69SPeter Zijlstra 2236ca79b0c2SArun KS if (count > totalram_pages()) 22371da177e4SLinus Torvalds return NULL; 22381da177e4SLinus Torvalds 223965ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 224065ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 22411da177e4SLinus Torvalds if (!area) 22421da177e4SLinus Torvalds return NULL; 224323016969SChristoph Lameter 2244f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) { 22451da177e4SLinus Torvalds vunmap(area->addr); 22461da177e4SLinus Torvalds return NULL; 22471da177e4SLinus Torvalds } 22481da177e4SLinus Torvalds 22491da177e4SLinus Torvalds return area->addr; 22501da177e4SLinus Torvalds } 22511da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 22521da177e4SLinus Torvalds 22538594a21cSMichal Hocko static void *__vmalloc_node(unsigned long size, unsigned long align, 22548594a21cSMichal Hocko gfp_t gfp_mask, pgprot_t prot, 22558594a21cSMichal Hocko int node, const void *caller); 2256e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 22573722e13cSWanpeng Li pgprot_t prot, int node) 22581da177e4SLinus Torvalds { 22591da177e4SLinus Torvalds struct page **pages; 22601da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 2261930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 2262704b862fSLaura Abbott const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; 2263704b862fSLaura Abbott const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? 2264704b862fSLaura Abbott 0 : 2265704b862fSLaura Abbott __GFP_HIGHMEM; 22661da177e4SLinus Torvalds 2267762216abSWanpeng Li nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 22681da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 22691da177e4SLinus Torvalds 22701da177e4SLinus Torvalds area->nr_pages = nr_pages; 22711da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 22728757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 2273704b862fSLaura Abbott pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, 22743722e13cSWanpeng Li PAGE_KERNEL, node, area->caller); 2275286e1ea3SAndrew Morton } else { 2276976d6dfbSJan Beulich pages = kmalloc_node(array_size, nested_gfp, node); 2277286e1ea3SAndrew Morton } 22781da177e4SLinus Torvalds area->pages = pages; 22791da177e4SLinus Torvalds if (!area->pages) { 22801da177e4SLinus Torvalds remove_vm_area(area->addr); 22811da177e4SLinus Torvalds kfree(area); 22821da177e4SLinus Torvalds return NULL; 22831da177e4SLinus Torvalds } 22841da177e4SLinus Torvalds 22851da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 2286bf53d6f8SChristoph Lameter struct page *page; 2287bf53d6f8SChristoph Lameter 22884b90951cSJianguo Wu if (node == NUMA_NO_NODE) 2289704b862fSLaura Abbott page = alloc_page(alloc_mask|highmem_mask); 2290930fc45aSChristoph Lameter else 2291704b862fSLaura Abbott page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); 2292bf53d6f8SChristoph Lameter 2293bf53d6f8SChristoph Lameter if (unlikely(!page)) { 22941da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 22951da177e4SLinus Torvalds area->nr_pages = i; 22961da177e4SLinus Torvalds goto fail; 22971da177e4SLinus Torvalds } 2298bf53d6f8SChristoph Lameter area->pages[i] = page; 2299704b862fSLaura Abbott if (gfpflags_allow_blocking(gfp_mask|highmem_mask)) 2300660654f9SEric Dumazet cond_resched(); 23011da177e4SLinus Torvalds } 23021da177e4SLinus Torvalds 2303f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) 23041da177e4SLinus Torvalds goto fail; 23051da177e4SLinus Torvalds return area->addr; 23061da177e4SLinus Torvalds 23071da177e4SLinus Torvalds fail: 2308a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 23097877cdccSMichal Hocko "vmalloc: allocation failure, allocated %ld of %ld bytes", 231022943ab1SDave Hansen (area->nr_pages*PAGE_SIZE), area->size); 2311c67dc624SRoman Penyaev __vfree(area->addr); 23121da177e4SLinus Torvalds return NULL; 23131da177e4SLinus Torvalds } 23141da177e4SLinus Torvalds 2315d0a21265SDavid Rientjes /** 2316d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 2317d0a21265SDavid Rientjes * @size: allocation size 2318d0a21265SDavid Rientjes * @align: desired alignment 2319d0a21265SDavid Rientjes * @start: vm area range start 2320d0a21265SDavid Rientjes * @end: vm area range end 2321d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 2322d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 2323cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 232400ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2325d0a21265SDavid Rientjes * @caller: caller's return address 2326d0a21265SDavid Rientjes * 2327d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 2328d0a21265SDavid Rientjes * allocator with @gfp_mask flags. Map them into contiguous 2329d0a21265SDavid Rientjes * kernel virtual space, using a pagetable protection of @prot. 2330a862f68aSMike Rapoport * 2331a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 2332d0a21265SDavid Rientjes */ 2333d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 2334d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 2335cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 2336cb9e3c29SAndrey Ryabinin const void *caller) 2337930fc45aSChristoph Lameter { 2338d0a21265SDavid Rientjes struct vm_struct *area; 2339d0a21265SDavid Rientjes void *addr; 2340d0a21265SDavid Rientjes unsigned long real_size = size; 2341d0a21265SDavid Rientjes 2342d0a21265SDavid Rientjes size = PAGE_ALIGN(size); 2343ca79b0c2SArun KS if (!size || (size >> PAGE_SHIFT) > totalram_pages()) 2344de7d2b56SJoe Perches goto fail; 2345d0a21265SDavid Rientjes 2346cb9e3c29SAndrey Ryabinin area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | 2347cb9e3c29SAndrey Ryabinin vm_flags, start, end, node, gfp_mask, caller); 2348d0a21265SDavid Rientjes if (!area) 2349de7d2b56SJoe Perches goto fail; 2350d0a21265SDavid Rientjes 23513722e13cSWanpeng Li addr = __vmalloc_area_node(area, gfp_mask, prot, node); 23521368edf0SMel Gorman if (!addr) 2353b82225f3SWanpeng Li return NULL; 235489219d37SCatalin Marinas 235589219d37SCatalin Marinas /* 235620fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 235720fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 23584341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 2359f5252e00SMitsuo Hayasaka */ 236020fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 2361f5252e00SMitsuo Hayasaka 236294f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask); 236389219d37SCatalin Marinas 236489219d37SCatalin Marinas return addr; 2365de7d2b56SJoe Perches 2366de7d2b56SJoe Perches fail: 2367a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 23687877cdccSMichal Hocko "vmalloc: allocation failure: %lu bytes", real_size); 2369de7d2b56SJoe Perches return NULL; 2370930fc45aSChristoph Lameter } 2371930fc45aSChristoph Lameter 2372153178edSUladzislau Rezki (Sony) /* 2373153178edSUladzislau Rezki (Sony) * This is only for performance analysis of vmalloc and stress purpose. 2374153178edSUladzislau Rezki (Sony) * It is required by vmalloc test module, therefore do not use it other 2375153178edSUladzislau Rezki (Sony) * than that. 2376153178edSUladzislau Rezki (Sony) */ 2377153178edSUladzislau Rezki (Sony) #ifdef CONFIG_TEST_VMALLOC_MODULE 2378153178edSUladzislau Rezki (Sony) EXPORT_SYMBOL_GPL(__vmalloc_node_range); 2379153178edSUladzislau Rezki (Sony) #endif 2380153178edSUladzislau Rezki (Sony) 23811da177e4SLinus Torvalds /** 2382930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 23831da177e4SLinus Torvalds * @size: allocation size 23842dca6999SDavid Miller * @align: desired alignment 23851da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 23861da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 238700ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2388c85d194bSRandy Dunlap * @caller: caller's return address 23891da177e4SLinus Torvalds * 23901da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 23911da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 23921da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 2393a7c3e901SMichal Hocko * 2394dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 2395a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported 2396a7c3e901SMichal Hocko * 2397a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted 2398a7c3e901SMichal Hocko * with mm people. 2399a862f68aSMike Rapoport * 2400a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 24011da177e4SLinus Torvalds */ 24028594a21cSMichal Hocko static void *__vmalloc_node(unsigned long size, unsigned long align, 24032dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 24045e6cafc8SMarek Szyprowski int node, const void *caller) 24051da177e4SLinus Torvalds { 2406d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 2407cb9e3c29SAndrey Ryabinin gfp_mask, prot, 0, node, caller); 24081da177e4SLinus Torvalds } 24091da177e4SLinus Torvalds 2410930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 2411930fc45aSChristoph Lameter { 241200ef2d2fSDavid Rientjes return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 241323016969SChristoph Lameter __builtin_return_address(0)); 2414930fc45aSChristoph Lameter } 24151da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 24161da177e4SLinus Torvalds 24178594a21cSMichal Hocko static inline void *__vmalloc_node_flags(unsigned long size, 24188594a21cSMichal Hocko int node, gfp_t flags) 24198594a21cSMichal Hocko { 24208594a21cSMichal Hocko return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 24218594a21cSMichal Hocko node, __builtin_return_address(0)); 24228594a21cSMichal Hocko } 24238594a21cSMichal Hocko 24248594a21cSMichal Hocko 24258594a21cSMichal Hocko void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, 24268594a21cSMichal Hocko void *caller) 24278594a21cSMichal Hocko { 24288594a21cSMichal Hocko return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller); 24298594a21cSMichal Hocko } 24308594a21cSMichal Hocko 24311da177e4SLinus Torvalds /** 24321da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 24331da177e4SLinus Torvalds * @size: allocation size 243492eac168SMike Rapoport * 24351da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 24361da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 24371da177e4SLinus Torvalds * 2438c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 24391da177e4SLinus Torvalds * use __vmalloc() instead. 2440a862f68aSMike Rapoport * 2441a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 24421da177e4SLinus Torvalds */ 24431da177e4SLinus Torvalds void *vmalloc(unsigned long size) 24441da177e4SLinus Torvalds { 244500ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 244619809c2dSMichal Hocko GFP_KERNEL); 24471da177e4SLinus Torvalds } 24481da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 24491da177e4SLinus Torvalds 2450930fc45aSChristoph Lameter /** 2451e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 2452e1ca7788SDave Young * @size: allocation size 245392eac168SMike Rapoport * 2454e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 2455e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 2456e1ca7788SDave Young * The memory allocated is set to zero. 2457e1ca7788SDave Young * 2458e1ca7788SDave Young * For tight control over page level allocator and protection flags 2459e1ca7788SDave Young * use __vmalloc() instead. 2460a862f68aSMike Rapoport * 2461a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2462e1ca7788SDave Young */ 2463e1ca7788SDave Young void *vzalloc(unsigned long size) 2464e1ca7788SDave Young { 246500ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 246619809c2dSMichal Hocko GFP_KERNEL | __GFP_ZERO); 2467e1ca7788SDave Young } 2468e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 2469e1ca7788SDave Young 2470e1ca7788SDave Young /** 2471ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 247283342314SNick Piggin * @size: allocation size 2473ead04089SRolf Eike Beer * 2474ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 2475ead04089SRolf Eike Beer * without leaking data. 2476a862f68aSMike Rapoport * 2477a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 247883342314SNick Piggin */ 247983342314SNick Piggin void *vmalloc_user(unsigned long size) 248083342314SNick Piggin { 2481bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2482bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 2483bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 248400ef2d2fSDavid Rientjes __builtin_return_address(0)); 248583342314SNick Piggin } 248683342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 248783342314SNick Piggin 248883342314SNick Piggin /** 2489930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 2490930fc45aSChristoph Lameter * @size: allocation size 2491d44e0780SRandy Dunlap * @node: numa node 2492930fc45aSChristoph Lameter * 2493930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 2494930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 2495930fc45aSChristoph Lameter * 2496c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 2497930fc45aSChristoph Lameter * use __vmalloc() instead. 2498a862f68aSMike Rapoport * 2499a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2500930fc45aSChristoph Lameter */ 2501930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 2502930fc45aSChristoph Lameter { 250319809c2dSMichal Hocko return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, 250423016969SChristoph Lameter node, __builtin_return_address(0)); 2505930fc45aSChristoph Lameter } 2506930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 2507930fc45aSChristoph Lameter 2508e1ca7788SDave Young /** 2509e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 2510e1ca7788SDave Young * @size: allocation size 2511e1ca7788SDave Young * @node: numa node 2512e1ca7788SDave Young * 2513e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 2514e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 2515e1ca7788SDave Young * The memory allocated is set to zero. 2516e1ca7788SDave Young * 2517e1ca7788SDave Young * For tight control over page level allocator and protection flags 2518e1ca7788SDave Young * use __vmalloc_node() instead. 2519a862f68aSMike Rapoport * 2520a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2521e1ca7788SDave Young */ 2522e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 2523e1ca7788SDave Young { 2524e1ca7788SDave Young return __vmalloc_node_flags(size, node, 252519809c2dSMichal Hocko GFP_KERNEL | __GFP_ZERO); 2526e1ca7788SDave Young } 2527e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 2528e1ca7788SDave Young 25291da177e4SLinus Torvalds /** 25301da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 25311da177e4SLinus Torvalds * @size: allocation size 25321da177e4SLinus Torvalds * 25331da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 25341da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 25351da177e4SLinus Torvalds * executable kernel virtual space. 25361da177e4SLinus Torvalds * 2537c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 25381da177e4SLinus Torvalds * use __vmalloc() instead. 2539a862f68aSMike Rapoport * 2540a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 25411da177e4SLinus Torvalds */ 25421da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 25431da177e4SLinus Torvalds { 2544868b104dSRick Edgecombe return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 2545868b104dSRick Edgecombe GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, 254600ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 25471da177e4SLinus Torvalds } 25481da177e4SLinus Torvalds 25490d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 2550698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 25510d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 2552698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 25530d08e0d3SAndi Kleen #else 2554698d0831SMichal Hocko /* 2555698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others 2556698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone. 2557698d0831SMichal Hocko */ 2558698d0831SMichal Hocko #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 25590d08e0d3SAndi Kleen #endif 25600d08e0d3SAndi Kleen 25611da177e4SLinus Torvalds /** 25621da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 25631da177e4SLinus Torvalds * @size: allocation size 25641da177e4SLinus Torvalds * 25651da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 25661da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 2567a862f68aSMike Rapoport * 2568a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 25691da177e4SLinus Torvalds */ 25701da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 25711da177e4SLinus Torvalds { 25722dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 257300ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 25741da177e4SLinus Torvalds } 25751da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 25761da177e4SLinus Torvalds 257783342314SNick Piggin /** 2578ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 257983342314SNick Piggin * @size: allocation size 2580ead04089SRolf Eike Beer * 2581ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 2582ead04089SRolf Eike Beer * mapped to userspace without leaking data. 2583a862f68aSMike Rapoport * 2584a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 258583342314SNick Piggin */ 258683342314SNick Piggin void *vmalloc_32_user(unsigned long size) 258783342314SNick Piggin { 2588bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2589bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 2590bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 25915a82ac71SRoman Penyaev __builtin_return_address(0)); 259283342314SNick Piggin } 259383342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 259483342314SNick Piggin 2595d0107eb0SKAMEZAWA Hiroyuki /* 2596d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 2597d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 2598d0107eb0SKAMEZAWA Hiroyuki */ 2599d0107eb0SKAMEZAWA Hiroyuki 2600d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 2601d0107eb0SKAMEZAWA Hiroyuki { 2602d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2603d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2604d0107eb0SKAMEZAWA Hiroyuki 2605d0107eb0SKAMEZAWA Hiroyuki while (count) { 2606d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2607d0107eb0SKAMEZAWA Hiroyuki 2608891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2609d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2610d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2611d0107eb0SKAMEZAWA Hiroyuki length = count; 2612d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2613d0107eb0SKAMEZAWA Hiroyuki /* 2614d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2615d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2616d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2617d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2618d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2619d0107eb0SKAMEZAWA Hiroyuki */ 2620d0107eb0SKAMEZAWA Hiroyuki if (p) { 2621d0107eb0SKAMEZAWA Hiroyuki /* 2622d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2623d0107eb0SKAMEZAWA Hiroyuki * function description) 2624d0107eb0SKAMEZAWA Hiroyuki */ 26259b04c5feSCong Wang void *map = kmap_atomic(p); 2626d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 26279b04c5feSCong Wang kunmap_atomic(map); 2628d0107eb0SKAMEZAWA Hiroyuki } else 2629d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 2630d0107eb0SKAMEZAWA Hiroyuki 2631d0107eb0SKAMEZAWA Hiroyuki addr += length; 2632d0107eb0SKAMEZAWA Hiroyuki buf += length; 2633d0107eb0SKAMEZAWA Hiroyuki copied += length; 2634d0107eb0SKAMEZAWA Hiroyuki count -= length; 2635d0107eb0SKAMEZAWA Hiroyuki } 2636d0107eb0SKAMEZAWA Hiroyuki return copied; 2637d0107eb0SKAMEZAWA Hiroyuki } 2638d0107eb0SKAMEZAWA Hiroyuki 2639d0107eb0SKAMEZAWA Hiroyuki static int aligned_vwrite(char *buf, char *addr, unsigned long count) 2640d0107eb0SKAMEZAWA Hiroyuki { 2641d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2642d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2643d0107eb0SKAMEZAWA Hiroyuki 2644d0107eb0SKAMEZAWA Hiroyuki while (count) { 2645d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2646d0107eb0SKAMEZAWA Hiroyuki 2647891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2648d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2649d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2650d0107eb0SKAMEZAWA Hiroyuki length = count; 2651d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2652d0107eb0SKAMEZAWA Hiroyuki /* 2653d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2654d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2655d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2656d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2657d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2658d0107eb0SKAMEZAWA Hiroyuki */ 2659d0107eb0SKAMEZAWA Hiroyuki if (p) { 2660d0107eb0SKAMEZAWA Hiroyuki /* 2661d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2662d0107eb0SKAMEZAWA Hiroyuki * function description) 2663d0107eb0SKAMEZAWA Hiroyuki */ 26649b04c5feSCong Wang void *map = kmap_atomic(p); 2665d0107eb0SKAMEZAWA Hiroyuki memcpy(map + offset, buf, length); 26669b04c5feSCong Wang kunmap_atomic(map); 2667d0107eb0SKAMEZAWA Hiroyuki } 2668d0107eb0SKAMEZAWA Hiroyuki addr += length; 2669d0107eb0SKAMEZAWA Hiroyuki buf += length; 2670d0107eb0SKAMEZAWA Hiroyuki copied += length; 2671d0107eb0SKAMEZAWA Hiroyuki count -= length; 2672d0107eb0SKAMEZAWA Hiroyuki } 2673d0107eb0SKAMEZAWA Hiroyuki return copied; 2674d0107eb0SKAMEZAWA Hiroyuki } 2675d0107eb0SKAMEZAWA Hiroyuki 2676d0107eb0SKAMEZAWA Hiroyuki /** 2677d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 2678d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 2679d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2680d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2681d0107eb0SKAMEZAWA Hiroyuki * 2682d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2683d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 2684d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 2685d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 2686d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2687d0107eb0SKAMEZAWA Hiroyuki * 2688d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2689a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2690d0107eb0SKAMEZAWA Hiroyuki * 2691d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 2692d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2693d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2694d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2695a862f68aSMike Rapoport * 2696a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased 2697a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't 2698a862f68aSMike Rapoport * include any intersection with valid vmalloc area 2699d0107eb0SKAMEZAWA Hiroyuki */ 27001da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 27011da177e4SLinus Torvalds { 2702e81ce85fSJoonsoo Kim struct vmap_area *va; 2703e81ce85fSJoonsoo Kim struct vm_struct *vm; 27041da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 2705d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 27061da177e4SLinus Torvalds unsigned long n; 27071da177e4SLinus Torvalds 27081da177e4SLinus Torvalds /* Don't allow overflow */ 27091da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 27101da177e4SLinus Torvalds count = -(unsigned long) addr; 27111da177e4SLinus Torvalds 2712e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2713e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2714e81ce85fSJoonsoo Kim if (!count) 2715e81ce85fSJoonsoo Kim break; 2716e81ce85fSJoonsoo Kim 2717e81ce85fSJoonsoo Kim if (!(va->flags & VM_VM_AREA)) 2718e81ce85fSJoonsoo Kim continue; 2719e81ce85fSJoonsoo Kim 2720e81ce85fSJoonsoo Kim vm = va->vm; 2721e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2722762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 27231da177e4SLinus Torvalds continue; 27241da177e4SLinus Torvalds while (addr < vaddr) { 27251da177e4SLinus Torvalds if (count == 0) 27261da177e4SLinus Torvalds goto finished; 27271da177e4SLinus Torvalds *buf = '\0'; 27281da177e4SLinus Torvalds buf++; 27291da177e4SLinus Torvalds addr++; 27301da177e4SLinus Torvalds count--; 27311da177e4SLinus Torvalds } 2732762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2733d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2734d0107eb0SKAMEZAWA Hiroyuki n = count; 2735e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) 2736d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 2737d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 2738d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 2739d0107eb0SKAMEZAWA Hiroyuki buf += n; 2740d0107eb0SKAMEZAWA Hiroyuki addr += n; 2741d0107eb0SKAMEZAWA Hiroyuki count -= n; 27421da177e4SLinus Torvalds } 27431da177e4SLinus Torvalds finished: 2744e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2745d0107eb0SKAMEZAWA Hiroyuki 2746d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 2747d0107eb0SKAMEZAWA Hiroyuki return 0; 2748d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 2749d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 2750d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 2751d0107eb0SKAMEZAWA Hiroyuki 2752d0107eb0SKAMEZAWA Hiroyuki return buflen; 27531da177e4SLinus Torvalds } 27541da177e4SLinus Torvalds 2755d0107eb0SKAMEZAWA Hiroyuki /** 2756d0107eb0SKAMEZAWA Hiroyuki * vwrite() - write vmalloc area in a safe way. 2757d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for source data 2758d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2759d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2760d0107eb0SKAMEZAWA Hiroyuki * 2761d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2762d0107eb0SKAMEZAWA Hiroyuki * copy data from a buffer to the given addr. If specified range of 2763d0107eb0SKAMEZAWA Hiroyuki * [addr...addr+count) includes some valid address, data is copied from 2764d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, no copy to hole. 2765d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2766d0107eb0SKAMEZAWA Hiroyuki * 2767d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2768a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2769d0107eb0SKAMEZAWA Hiroyuki * 2770d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vwrite() is never necessary because the caller 2771d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2772d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2773d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2774a862f68aSMike Rapoport * 2775a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be 2776a862f68aSMike Rapoport * increased (same number as @count) or %0 if [addr...addr+count) 2777a862f68aSMike Rapoport * doesn't include any intersection with valid vmalloc area 2778d0107eb0SKAMEZAWA Hiroyuki */ 27791da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 27801da177e4SLinus Torvalds { 2781e81ce85fSJoonsoo Kim struct vmap_area *va; 2782e81ce85fSJoonsoo Kim struct vm_struct *vm; 2783d0107eb0SKAMEZAWA Hiroyuki char *vaddr; 2784d0107eb0SKAMEZAWA Hiroyuki unsigned long n, buflen; 2785d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 27861da177e4SLinus Torvalds 27871da177e4SLinus Torvalds /* Don't allow overflow */ 27881da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 27891da177e4SLinus Torvalds count = -(unsigned long) addr; 2790d0107eb0SKAMEZAWA Hiroyuki buflen = count; 27911da177e4SLinus Torvalds 2792e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2793e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2794e81ce85fSJoonsoo Kim if (!count) 2795e81ce85fSJoonsoo Kim break; 2796e81ce85fSJoonsoo Kim 2797e81ce85fSJoonsoo Kim if (!(va->flags & VM_VM_AREA)) 2798e81ce85fSJoonsoo Kim continue; 2799e81ce85fSJoonsoo Kim 2800e81ce85fSJoonsoo Kim vm = va->vm; 2801e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2802762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 28031da177e4SLinus Torvalds continue; 28041da177e4SLinus Torvalds while (addr < vaddr) { 28051da177e4SLinus Torvalds if (count == 0) 28061da177e4SLinus Torvalds goto finished; 28071da177e4SLinus Torvalds buf++; 28081da177e4SLinus Torvalds addr++; 28091da177e4SLinus Torvalds count--; 28101da177e4SLinus Torvalds } 2811762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2812d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2813d0107eb0SKAMEZAWA Hiroyuki n = count; 2814e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) { 2815d0107eb0SKAMEZAWA Hiroyuki aligned_vwrite(buf, addr, n); 2816d0107eb0SKAMEZAWA Hiroyuki copied++; 2817d0107eb0SKAMEZAWA Hiroyuki } 2818d0107eb0SKAMEZAWA Hiroyuki buf += n; 2819d0107eb0SKAMEZAWA Hiroyuki addr += n; 2820d0107eb0SKAMEZAWA Hiroyuki count -= n; 28211da177e4SLinus Torvalds } 28221da177e4SLinus Torvalds finished: 2823e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2824d0107eb0SKAMEZAWA Hiroyuki if (!copied) 2825d0107eb0SKAMEZAWA Hiroyuki return 0; 2826d0107eb0SKAMEZAWA Hiroyuki return buflen; 28271da177e4SLinus Torvalds } 282883342314SNick Piggin 282983342314SNick Piggin /** 2830e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 2831e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 2832e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 2833e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 2834e69e9d4aSHATAYAMA Daisuke * @size: size of map area 2835e69e9d4aSHATAYAMA Daisuke * 2836e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 2837e69e9d4aSHATAYAMA Daisuke * 2838e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 2839e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 2840e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 2841e69e9d4aSHATAYAMA Daisuke * met. 2842e69e9d4aSHATAYAMA Daisuke * 2843e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 2844e69e9d4aSHATAYAMA Daisuke */ 2845e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 2846e69e9d4aSHATAYAMA Daisuke void *kaddr, unsigned long size) 2847e69e9d4aSHATAYAMA Daisuke { 2848e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 2849e69e9d4aSHATAYAMA Daisuke 2850e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 2851e69e9d4aSHATAYAMA Daisuke 2852e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 2853e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2854e69e9d4aSHATAYAMA Daisuke 2855e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 2856e69e9d4aSHATAYAMA Daisuke if (!area) 2857e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2858e69e9d4aSHATAYAMA Daisuke 2859e69e9d4aSHATAYAMA Daisuke if (!(area->flags & VM_USERMAP)) 2860e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2861e69e9d4aSHATAYAMA Daisuke 2862401592d2SRoman Penyaev if (kaddr + size > area->addr + get_vm_area_size(area)) 2863e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2864e69e9d4aSHATAYAMA Daisuke 2865e69e9d4aSHATAYAMA Daisuke do { 2866e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 2867e69e9d4aSHATAYAMA Daisuke int ret; 2868e69e9d4aSHATAYAMA Daisuke 2869e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 2870e69e9d4aSHATAYAMA Daisuke if (ret) 2871e69e9d4aSHATAYAMA Daisuke return ret; 2872e69e9d4aSHATAYAMA Daisuke 2873e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 2874e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 2875e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 2876e69e9d4aSHATAYAMA Daisuke } while (size > 0); 2877e69e9d4aSHATAYAMA Daisuke 2878e69e9d4aSHATAYAMA Daisuke vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 2879e69e9d4aSHATAYAMA Daisuke 2880e69e9d4aSHATAYAMA Daisuke return 0; 2881e69e9d4aSHATAYAMA Daisuke } 2882e69e9d4aSHATAYAMA Daisuke EXPORT_SYMBOL(remap_vmalloc_range_partial); 2883e69e9d4aSHATAYAMA Daisuke 2884e69e9d4aSHATAYAMA Daisuke /** 288583342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 288683342314SNick Piggin * @vma: vma to cover (map full range of vma) 288783342314SNick Piggin * @addr: vmalloc memory 288883342314SNick Piggin * @pgoff: number of pages into addr before first page to map 28897682486bSRandy Dunlap * 28907682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 289183342314SNick Piggin * 289283342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 289383342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 289483342314SNick Piggin * that criteria isn't met. 289583342314SNick Piggin * 289672fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 289783342314SNick Piggin */ 289883342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 289983342314SNick Piggin unsigned long pgoff) 290083342314SNick Piggin { 2901e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 2902e69e9d4aSHATAYAMA Daisuke addr + (pgoff << PAGE_SHIFT), 2903e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 290483342314SNick Piggin } 290583342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 290683342314SNick Piggin 29071eeb66a1SChristoph Hellwig /* 29081eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 29091eeb66a1SChristoph Hellwig * have one. 29101eeb66a1SChristoph Hellwig */ 29113b32123dSGideon Israel Dsouza void __weak vmalloc_sync_all(void) 29121eeb66a1SChristoph Hellwig { 29131eeb66a1SChristoph Hellwig } 29145f4352fbSJeremy Fitzhardinge 29155f4352fbSJeremy Fitzhardinge 29162f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 29175f4352fbSJeremy Fitzhardinge { 2918cd12909cSDavid Vrabel pte_t ***p = data; 2919cd12909cSDavid Vrabel 2920cd12909cSDavid Vrabel if (p) { 2921cd12909cSDavid Vrabel *(*p) = pte; 2922cd12909cSDavid Vrabel (*p)++; 2923cd12909cSDavid Vrabel } 29245f4352fbSJeremy Fitzhardinge return 0; 29255f4352fbSJeremy Fitzhardinge } 29265f4352fbSJeremy Fitzhardinge 29275f4352fbSJeremy Fitzhardinge /** 29285f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 29295f4352fbSJeremy Fitzhardinge * @size: size of the area 2930cd12909cSDavid Vrabel * @ptes: returns the PTEs for the address space 29317682486bSRandy Dunlap * 29327682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 29335f4352fbSJeremy Fitzhardinge * 29345f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 29355f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 2936cd12909cSDavid Vrabel * are created. 2937cd12909cSDavid Vrabel * 2938cd12909cSDavid Vrabel * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 2939cd12909cSDavid Vrabel * allocated for the VM area are returned. 29405f4352fbSJeremy Fitzhardinge */ 2941cd12909cSDavid Vrabel struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 29425f4352fbSJeremy Fitzhardinge { 29435f4352fbSJeremy Fitzhardinge struct vm_struct *area; 29445f4352fbSJeremy Fitzhardinge 294523016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 294623016969SChristoph Lameter __builtin_return_address(0)); 29475f4352fbSJeremy Fitzhardinge if (area == NULL) 29485f4352fbSJeremy Fitzhardinge return NULL; 29495f4352fbSJeremy Fitzhardinge 29505f4352fbSJeremy Fitzhardinge /* 29515f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 29525f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 29535f4352fbSJeremy Fitzhardinge */ 29545f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2955cd12909cSDavid Vrabel size, f, ptes ? &ptes : NULL)) { 29565f4352fbSJeremy Fitzhardinge free_vm_area(area); 29575f4352fbSJeremy Fitzhardinge return NULL; 29585f4352fbSJeremy Fitzhardinge } 29595f4352fbSJeremy Fitzhardinge 29605f4352fbSJeremy Fitzhardinge return area; 29615f4352fbSJeremy Fitzhardinge } 29625f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 29635f4352fbSJeremy Fitzhardinge 29645f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 29655f4352fbSJeremy Fitzhardinge { 29665f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 29675f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 29685f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 29695f4352fbSJeremy Fitzhardinge kfree(area); 29705f4352fbSJeremy Fitzhardinge } 29715f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 2972a10aa579SChristoph Lameter 29734f8b02b4STejun Heo #ifdef CONFIG_SMP 2974ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 2975ca23e405STejun Heo { 29764583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 2977ca23e405STejun Heo } 2978ca23e405STejun Heo 2979ca23e405STejun Heo /** 2980*68ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 2981*68ad4a33SUladzislau Rezki (Sony) * @addr: target address 2982ca23e405STejun Heo * 2983*68ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area 2984*68ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned 2985*68ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL 2986*68ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr. 2987ca23e405STejun Heo */ 2988*68ad4a33SUladzislau Rezki (Sony) static struct vmap_area * 2989*68ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr) 2990ca23e405STejun Heo { 2991*68ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp; 2992*68ad4a33SUladzislau Rezki (Sony) struct rb_node *n; 2993*68ad4a33SUladzislau Rezki (Sony) 2994*68ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node; 2995*68ad4a33SUladzislau Rezki (Sony) va = NULL; 2996ca23e405STejun Heo 2997ca23e405STejun Heo while (n) { 2998*68ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node); 2999*68ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) { 3000*68ad4a33SUladzislau Rezki (Sony) va = tmp; 3001*68ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr) 3002ca23e405STejun Heo break; 3003ca23e405STejun Heo 3004*68ad4a33SUladzislau Rezki (Sony) n = n->rb_right; 3005ca23e405STejun Heo } else { 3006*68ad4a33SUladzislau Rezki (Sony) n = n->rb_left; 3007ca23e405STejun Heo } 3008*68ad4a33SUladzislau Rezki (Sony) } 3009*68ad4a33SUladzislau Rezki (Sony) 3010*68ad4a33SUladzislau Rezki (Sony) return va; 3011ca23e405STejun Heo } 3012ca23e405STejun Heo 3013ca23e405STejun Heo /** 3014*68ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address 3015*68ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END 3016*68ad4a33SUladzislau Rezki (Sony) * @va: 3017*68ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order); 3018*68ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address. 3019ca23e405STejun Heo * 3020*68ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area 3021ca23e405STejun Heo */ 3022*68ad4a33SUladzislau Rezki (Sony) static unsigned long 3023*68ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 3024ca23e405STejun Heo { 3025*68ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3026ca23e405STejun Heo unsigned long addr; 3027ca23e405STejun Heo 3028*68ad4a33SUladzislau Rezki (Sony) if (likely(*va)) { 3029*68ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va), 3030*68ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) { 3031*68ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 3032*68ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr) 3033*68ad4a33SUladzislau Rezki (Sony) return addr; 3034*68ad4a33SUladzislau Rezki (Sony) } 3035ca23e405STejun Heo } 3036ca23e405STejun Heo 3037*68ad4a33SUladzislau Rezki (Sony) return 0; 3038ca23e405STejun Heo } 3039ca23e405STejun Heo 3040ca23e405STejun Heo /** 3041ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 3042ca23e405STejun Heo * @offsets: array containing offset of each area 3043ca23e405STejun Heo * @sizes: array containing size of each area 3044ca23e405STejun Heo * @nr_vms: the number of areas to allocate 3045ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 3046ca23e405STejun Heo * 3047ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 3048ca23e405STejun Heo * vm_structs on success, %NULL on failure 3049ca23e405STejun Heo * 3050ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 3051ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 3052ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 3053ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 3054ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 3055ec3f64fcSDavid Rientjes * areas are allocated from top. 3056ca23e405STejun Heo * 3057ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 3058*68ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking 3059*68ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the 3060*68ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till 3061*68ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted 3062*68ad4a33SUladzislau Rezki (Sony) * and the result is returned. 3063ca23e405STejun Heo */ 3064ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 3065ca23e405STejun Heo const size_t *sizes, int nr_vms, 3066ec3f64fcSDavid Rientjes size_t align) 3067ca23e405STejun Heo { 3068ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 3069ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3070*68ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va; 3071ca23e405STejun Heo struct vm_struct **vms; 3072ca23e405STejun Heo int area, area2, last_area, term_area; 3073*68ad4a33SUladzislau Rezki (Sony) unsigned long base, start, size, end, last_end; 3074ca23e405STejun Heo bool purged = false; 3075*68ad4a33SUladzislau Rezki (Sony) enum fit_type type; 3076ca23e405STejun Heo 3077ca23e405STejun Heo /* verify parameters and allocate data structures */ 3078891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 3079ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 3080ca23e405STejun Heo start = offsets[area]; 3081ca23e405STejun Heo end = start + sizes[area]; 3082ca23e405STejun Heo 3083ca23e405STejun Heo /* is everything aligned properly? */ 3084ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 3085ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 3086ca23e405STejun Heo 3087ca23e405STejun Heo /* detect the area with the highest address */ 3088ca23e405STejun Heo if (start > offsets[last_area]) 3089ca23e405STejun Heo last_area = area; 3090ca23e405STejun Heo 3091c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) { 3092ca23e405STejun Heo unsigned long start2 = offsets[area2]; 3093ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 3094ca23e405STejun Heo 3095c568da28SWei Yang BUG_ON(start2 < end && start < end2); 3096ca23e405STejun Heo } 3097ca23e405STejun Heo } 3098ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 3099ca23e405STejun Heo 3100ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 3101ca23e405STejun Heo WARN_ON(true); 3102ca23e405STejun Heo return NULL; 3103ca23e405STejun Heo } 3104ca23e405STejun Heo 31054d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 31064d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 3107ca23e405STejun Heo if (!vas || !vms) 3108f1db7afdSKautuk Consul goto err_free2; 3109ca23e405STejun Heo 3110ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 3111*68ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 3112ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 3113ca23e405STejun Heo if (!vas[area] || !vms[area]) 3114ca23e405STejun Heo goto err_free; 3115ca23e405STejun Heo } 3116ca23e405STejun Heo retry: 3117ca23e405STejun Heo spin_lock(&vmap_area_lock); 3118ca23e405STejun Heo 3119ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 3120ca23e405STejun Heo area = term_area = last_area; 3121ca23e405STejun Heo start = offsets[area]; 3122ca23e405STejun Heo end = start + sizes[area]; 3123ca23e405STejun Heo 3124*68ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end); 3125*68ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3126ca23e405STejun Heo 3127ca23e405STejun Heo while (true) { 3128ca23e405STejun Heo /* 3129ca23e405STejun Heo * base might have underflowed, add last_end before 3130ca23e405STejun Heo * comparing. 3131ca23e405STejun Heo */ 3132*68ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end) 3133*68ad4a33SUladzislau Rezki (Sony) goto overflow; 3134ca23e405STejun Heo 3135ca23e405STejun Heo /* 3136*68ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found. 3137ca23e405STejun Heo */ 3138*68ad4a33SUladzislau Rezki (Sony) if (va == NULL) 3139*68ad4a33SUladzislau Rezki (Sony) goto overflow; 3140ca23e405STejun Heo 3141ca23e405STejun Heo /* 3142*68ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck. 3143ca23e405STejun Heo */ 3144*68ad4a33SUladzislau Rezki (Sony) if (base + start < va->va_start || base + end > va->va_end) { 3145*68ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node)); 3146*68ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3147ca23e405STejun Heo term_area = area; 3148ca23e405STejun Heo continue; 3149ca23e405STejun Heo } 3150ca23e405STejun Heo 3151ca23e405STejun Heo /* 3152ca23e405STejun Heo * This area fits, move on to the previous one. If 3153ca23e405STejun Heo * the previous one is the terminal one, we're done. 3154ca23e405STejun Heo */ 3155ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 3156ca23e405STejun Heo if (area == term_area) 3157ca23e405STejun Heo break; 3158*68ad4a33SUladzislau Rezki (Sony) 3159ca23e405STejun Heo start = offsets[area]; 3160ca23e405STejun Heo end = start + sizes[area]; 3161*68ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end); 3162ca23e405STejun Heo } 3163*68ad4a33SUladzislau Rezki (Sony) 3164ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 3165ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 3166*68ad4a33SUladzislau Rezki (Sony) int ret; 3167ca23e405STejun Heo 3168*68ad4a33SUladzislau Rezki (Sony) start = base + offsets[area]; 3169*68ad4a33SUladzislau Rezki (Sony) size = sizes[area]; 3170*68ad4a33SUladzislau Rezki (Sony) 3171*68ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start); 3172*68ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL)) 3173*68ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 3174*68ad4a33SUladzislau Rezki (Sony) goto recovery; 3175*68ad4a33SUladzislau Rezki (Sony) 3176*68ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, start, size); 3177*68ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 3178*68ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 3179*68ad4a33SUladzislau Rezki (Sony) goto recovery; 3180*68ad4a33SUladzislau Rezki (Sony) 3181*68ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, start, size, type); 3182*68ad4a33SUladzislau Rezki (Sony) if (unlikely(ret)) 3183*68ad4a33SUladzislau Rezki (Sony) goto recovery; 3184*68ad4a33SUladzislau Rezki (Sony) 3185*68ad4a33SUladzislau Rezki (Sony) /* Allocated area. */ 3186*68ad4a33SUladzislau Rezki (Sony) va = vas[area]; 3187*68ad4a33SUladzislau Rezki (Sony) va->va_start = start; 3188*68ad4a33SUladzislau Rezki (Sony) va->va_end = start + size; 3189*68ad4a33SUladzislau Rezki (Sony) 3190*68ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 3191ca23e405STejun Heo } 3192ca23e405STejun Heo 3193ca23e405STejun Heo spin_unlock(&vmap_area_lock); 3194ca23e405STejun Heo 3195ca23e405STejun Heo /* insert all vm's */ 3196ca23e405STejun Heo for (area = 0; area < nr_vms; area++) 31973645cb4aSZhang Yanfei setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 3198ca23e405STejun Heo pcpu_get_vm_areas); 3199ca23e405STejun Heo 3200ca23e405STejun Heo kfree(vas); 3201ca23e405STejun Heo return vms; 3202ca23e405STejun Heo 3203*68ad4a33SUladzislau Rezki (Sony) recovery: 3204*68ad4a33SUladzislau Rezki (Sony) /* Remove previously inserted areas. */ 3205*68ad4a33SUladzislau Rezki (Sony) while (area--) { 3206*68ad4a33SUladzislau Rezki (Sony) __free_vmap_area(vas[area]); 3207*68ad4a33SUladzislau Rezki (Sony) vas[area] = NULL; 3208*68ad4a33SUladzislau Rezki (Sony) } 3209*68ad4a33SUladzislau Rezki (Sony) 3210*68ad4a33SUladzislau Rezki (Sony) overflow: 3211*68ad4a33SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 3212*68ad4a33SUladzislau Rezki (Sony) if (!purged) { 3213*68ad4a33SUladzislau Rezki (Sony) purge_vmap_area_lazy(); 3214*68ad4a33SUladzislau Rezki (Sony) purged = true; 3215*68ad4a33SUladzislau Rezki (Sony) 3216*68ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */ 3217*68ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 3218*68ad4a33SUladzislau Rezki (Sony) if (vas[area]) 3219*68ad4a33SUladzislau Rezki (Sony) continue; 3220*68ad4a33SUladzislau Rezki (Sony) 3221*68ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc( 3222*68ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL); 3223*68ad4a33SUladzislau Rezki (Sony) if (!vas[area]) 3224*68ad4a33SUladzislau Rezki (Sony) goto err_free; 3225*68ad4a33SUladzislau Rezki (Sony) } 3226*68ad4a33SUladzislau Rezki (Sony) 3227*68ad4a33SUladzislau Rezki (Sony) goto retry; 3228*68ad4a33SUladzislau Rezki (Sony) } 3229*68ad4a33SUladzislau Rezki (Sony) 3230ca23e405STejun Heo err_free: 3231ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 3232*68ad4a33SUladzislau Rezki (Sony) if (vas[area]) 3233*68ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]); 3234*68ad4a33SUladzislau Rezki (Sony) 3235ca23e405STejun Heo kfree(vms[area]); 3236ca23e405STejun Heo } 3237f1db7afdSKautuk Consul err_free2: 3238ca23e405STejun Heo kfree(vas); 3239ca23e405STejun Heo kfree(vms); 3240ca23e405STejun Heo return NULL; 3241ca23e405STejun Heo } 3242ca23e405STejun Heo 3243ca23e405STejun Heo /** 3244ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 3245ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 3246ca23e405STejun Heo * @nr_vms: the number of allocated areas 3247ca23e405STejun Heo * 3248ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 3249ca23e405STejun Heo */ 3250ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 3251ca23e405STejun Heo { 3252ca23e405STejun Heo int i; 3253ca23e405STejun Heo 3254ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 3255ca23e405STejun Heo free_vm_area(vms[i]); 3256ca23e405STejun Heo kfree(vms); 3257ca23e405STejun Heo } 32584f8b02b4STejun Heo #endif /* CONFIG_SMP */ 3259a10aa579SChristoph Lameter 3260a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 3261a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 3262d4033afdSJoonsoo Kim __acquires(&vmap_area_lock) 3263a10aa579SChristoph Lameter { 3264d4033afdSJoonsoo Kim spin_lock(&vmap_area_lock); 32653f500069Szijun_hu return seq_list_start(&vmap_area_list, *pos); 3266a10aa579SChristoph Lameter } 3267a10aa579SChristoph Lameter 3268a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3269a10aa579SChristoph Lameter { 32703f500069Szijun_hu return seq_list_next(p, &vmap_area_list, pos); 3271a10aa579SChristoph Lameter } 3272a10aa579SChristoph Lameter 3273a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 3274d4033afdSJoonsoo Kim __releases(&vmap_area_lock) 3275a10aa579SChristoph Lameter { 3276d4033afdSJoonsoo Kim spin_unlock(&vmap_area_lock); 3277a10aa579SChristoph Lameter } 3278a10aa579SChristoph Lameter 3279a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 3280a47a126aSEric Dumazet { 3281e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 3282a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 3283a47a126aSEric Dumazet 3284a47a126aSEric Dumazet if (!counters) 3285a47a126aSEric Dumazet return; 3286a47a126aSEric Dumazet 3287af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 3288af12346cSWanpeng Li return; 32897e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 32907e5b528bSDmitry Vyukov smp_rmb(); 3291af12346cSWanpeng Li 3292a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 3293a47a126aSEric Dumazet 3294a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 3295a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 3296a47a126aSEric Dumazet 3297a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 3298a47a126aSEric Dumazet if (counters[nr]) 3299a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 3300a47a126aSEric Dumazet } 3301a47a126aSEric Dumazet } 3302a47a126aSEric Dumazet 3303a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 3304a10aa579SChristoph Lameter { 33053f500069Szijun_hu struct vmap_area *va; 3306d4033afdSJoonsoo Kim struct vm_struct *v; 3307d4033afdSJoonsoo Kim 33083f500069Szijun_hu va = list_entry(p, struct vmap_area, list); 33093f500069Szijun_hu 3310c2ce8c14SWanpeng Li /* 3311c2ce8c14SWanpeng Li * s_show can encounter race with remove_vm_area, !VM_VM_AREA on 3312c2ce8c14SWanpeng Li * behalf of vmap area is being tear down or vm_map_ram allocation. 3313c2ce8c14SWanpeng Li */ 331478c72746SYisheng Xie if (!(va->flags & VM_VM_AREA)) { 331578c72746SYisheng Xie seq_printf(m, "0x%pK-0x%pK %7ld %s\n", 331678c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end, 331778c72746SYisheng Xie va->va_end - va->va_start, 331878c72746SYisheng Xie va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram"); 331978c72746SYisheng Xie 3320d4033afdSJoonsoo Kim return 0; 332178c72746SYisheng Xie } 3322d4033afdSJoonsoo Kim 3323d4033afdSJoonsoo Kim v = va->vm; 3324a10aa579SChristoph Lameter 332545ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 3326a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 3327a10aa579SChristoph Lameter 332862c70bceSJoe Perches if (v->caller) 332962c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 333023016969SChristoph Lameter 3331a10aa579SChristoph Lameter if (v->nr_pages) 3332a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 3333a10aa579SChristoph Lameter 3334a10aa579SChristoph Lameter if (v->phys_addr) 3335199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr); 3336a10aa579SChristoph Lameter 3337a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 3338f4527c90SFabian Frederick seq_puts(m, " ioremap"); 3339a10aa579SChristoph Lameter 3340a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 3341f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 3342a10aa579SChristoph Lameter 3343a10aa579SChristoph Lameter if (v->flags & VM_MAP) 3344f4527c90SFabian Frederick seq_puts(m, " vmap"); 3345a10aa579SChristoph Lameter 3346a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 3347f4527c90SFabian Frederick seq_puts(m, " user"); 3348a10aa579SChristoph Lameter 3349244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 3350f4527c90SFabian Frederick seq_puts(m, " vpages"); 3351a10aa579SChristoph Lameter 3352a47a126aSEric Dumazet show_numa_info(m, v); 3353a10aa579SChristoph Lameter seq_putc(m, '\n'); 3354a10aa579SChristoph Lameter return 0; 3355a10aa579SChristoph Lameter } 3356a10aa579SChristoph Lameter 33575f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 3358a10aa579SChristoph Lameter .start = s_start, 3359a10aa579SChristoph Lameter .next = s_next, 3360a10aa579SChristoph Lameter .stop = s_stop, 3361a10aa579SChristoph Lameter .show = s_show, 3362a10aa579SChristoph Lameter }; 33635f6a6a9cSAlexey Dobriyan 33645f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 33655f6a6a9cSAlexey Dobriyan { 3366fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA)) 33670825a6f9SJoe Perches proc_create_seq_private("vmallocinfo", 0400, NULL, 336844414d82SChristoph Hellwig &vmalloc_op, 336944414d82SChristoph Hellwig nr_node_ids * sizeof(unsigned int), NULL); 3370fddda2b7SChristoph Hellwig else 33710825a6f9SJoe Perches proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 33725f6a6a9cSAlexey Dobriyan return 0; 33735f6a6a9cSAlexey Dobriyan } 33745f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 3375db3808c1SJoonsoo Kim 3376a10aa579SChristoph Lameter #endif 3377