11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 15d43c36dcSAlexey Dobriyan #include <linux/sched.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/spinlock.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 213ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2223016969SChristoph Lameter #include <linux/kallsyms.h> 23db64fe02SNick Piggin #include <linux/list.h> 244da56b99SChris Wilson #include <linux/notifier.h> 25db64fe02SNick Piggin #include <linux/rbtree.h> 26db64fe02SNick Piggin #include <linux/radix-tree.h> 27db64fe02SNick Piggin #include <linux/rcupdate.h> 28f0aa6617STejun Heo #include <linux/pfn.h> 2989219d37SCatalin Marinas #include <linux/kmemleak.h> 3060063497SArun Sharma #include <linux/atomic.h> 313b32123dSGideon Israel Dsouza #include <linux/compiler.h> 3232fcfd40SAl Viro #include <linux/llist.h> 330f616be1SToshi Kani #include <linux/bitops.h> 343b32123dSGideon Israel Dsouza 357c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 361da177e4SLinus Torvalds #include <asm/tlbflush.h> 372dca6999SDavid Miller #include <asm/shmparam.h> 381da177e4SLinus Torvalds 39dd56b046SMel Gorman #include "internal.h" 40dd56b046SMel Gorman 4132fcfd40SAl Viro struct vfree_deferred { 4232fcfd40SAl Viro struct llist_head list; 4332fcfd40SAl Viro struct work_struct wq; 4432fcfd40SAl Viro }; 4532fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 4632fcfd40SAl Viro 4732fcfd40SAl Viro static void __vunmap(const void *, int); 4832fcfd40SAl Viro 4932fcfd40SAl Viro static void free_work(struct work_struct *w) 5032fcfd40SAl Viro { 5132fcfd40SAl Viro struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 5232fcfd40SAl Viro struct llist_node *llnode = llist_del_all(&p->list); 5332fcfd40SAl Viro while (llnode) { 5432fcfd40SAl Viro void *p = llnode; 5532fcfd40SAl Viro llnode = llist_next(llnode); 5632fcfd40SAl Viro __vunmap(p, 1); 5732fcfd40SAl Viro } 5832fcfd40SAl Viro } 5932fcfd40SAl Viro 60db64fe02SNick Piggin /*** Page table manipulation functions ***/ 61b221385bSAdrian Bunk 621da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 631da177e4SLinus Torvalds { 641da177e4SLinus Torvalds pte_t *pte; 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 671da177e4SLinus Torvalds do { 681da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 691da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 701da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 711da177e4SLinus Torvalds } 721da177e4SLinus Torvalds 73db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 741da177e4SLinus Torvalds { 751da177e4SLinus Torvalds pmd_t *pmd; 761da177e4SLinus Torvalds unsigned long next; 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 791da177e4SLinus Torvalds do { 801da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 81b9820d8fSToshi Kani if (pmd_clear_huge(pmd)) 82b9820d8fSToshi Kani continue; 831da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 841da177e4SLinus Torvalds continue; 851da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 861da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 871da177e4SLinus Torvalds } 881da177e4SLinus Torvalds 89db64fe02SNick Piggin static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 901da177e4SLinus Torvalds { 911da177e4SLinus Torvalds pud_t *pud; 921da177e4SLinus Torvalds unsigned long next; 931da177e4SLinus Torvalds 941da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 951da177e4SLinus Torvalds do { 961da177e4SLinus Torvalds next = pud_addr_end(addr, end); 97b9820d8fSToshi Kani if (pud_clear_huge(pud)) 98b9820d8fSToshi Kani continue; 991da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 1001da177e4SLinus Torvalds continue; 1011da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 1021da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1031da177e4SLinus Torvalds } 1041da177e4SLinus Torvalds 105db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 1061da177e4SLinus Torvalds { 1071da177e4SLinus Torvalds pgd_t *pgd; 1081da177e4SLinus Torvalds unsigned long next; 1091da177e4SLinus Torvalds 1101da177e4SLinus Torvalds BUG_ON(addr >= end); 1111da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1121da177e4SLinus Torvalds do { 1131da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 1141da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 1151da177e4SLinus Torvalds continue; 1161da177e4SLinus Torvalds vunmap_pud_range(pgd, addr, next); 1171da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1181da177e4SLinus Torvalds } 1191da177e4SLinus Torvalds 1201da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 121db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1221da177e4SLinus Torvalds { 1231da177e4SLinus Torvalds pte_t *pte; 1241da177e4SLinus Torvalds 125db64fe02SNick Piggin /* 126db64fe02SNick Piggin * nr is a running index into the array which helps higher level 127db64fe02SNick Piggin * callers keep track of where we're up to. 128db64fe02SNick Piggin */ 129db64fe02SNick Piggin 130872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1311da177e4SLinus Torvalds if (!pte) 1321da177e4SLinus Torvalds return -ENOMEM; 1331da177e4SLinus Torvalds do { 134db64fe02SNick Piggin struct page *page = pages[*nr]; 135db64fe02SNick Piggin 136db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 137db64fe02SNick Piggin return -EBUSY; 138db64fe02SNick Piggin if (WARN_ON(!page)) 1391da177e4SLinus Torvalds return -ENOMEM; 1401da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 141db64fe02SNick Piggin (*nr)++; 1421da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1431da177e4SLinus Torvalds return 0; 1441da177e4SLinus Torvalds } 1451da177e4SLinus Torvalds 146db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 147db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1481da177e4SLinus Torvalds { 1491da177e4SLinus Torvalds pmd_t *pmd; 1501da177e4SLinus Torvalds unsigned long next; 1511da177e4SLinus Torvalds 1521da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1531da177e4SLinus Torvalds if (!pmd) 1541da177e4SLinus Torvalds return -ENOMEM; 1551da177e4SLinus Torvalds do { 1561da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 157db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1581da177e4SLinus Torvalds return -ENOMEM; 1591da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1601da177e4SLinus Torvalds return 0; 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds 163db64fe02SNick Piggin static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 164db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1651da177e4SLinus Torvalds { 1661da177e4SLinus Torvalds pud_t *pud; 1671da177e4SLinus Torvalds unsigned long next; 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds pud = pud_alloc(&init_mm, pgd, addr); 1701da177e4SLinus Torvalds if (!pud) 1711da177e4SLinus Torvalds return -ENOMEM; 1721da177e4SLinus Torvalds do { 1731da177e4SLinus Torvalds next = pud_addr_end(addr, end); 174db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 1751da177e4SLinus Torvalds return -ENOMEM; 1761da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1771da177e4SLinus Torvalds return 0; 1781da177e4SLinus Torvalds } 1791da177e4SLinus Torvalds 180db64fe02SNick Piggin /* 181db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 182db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 183db64fe02SNick Piggin * 184db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 185db64fe02SNick Piggin */ 1868fc48985STejun Heo static int vmap_page_range_noflush(unsigned long start, unsigned long end, 187db64fe02SNick Piggin pgprot_t prot, struct page **pages) 1881da177e4SLinus Torvalds { 1891da177e4SLinus Torvalds pgd_t *pgd; 1901da177e4SLinus Torvalds unsigned long next; 1912e4e27c7SAdam Lackorzynski unsigned long addr = start; 192db64fe02SNick Piggin int err = 0; 193db64fe02SNick Piggin int nr = 0; 1941da177e4SLinus Torvalds 1951da177e4SLinus Torvalds BUG_ON(addr >= end); 1961da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1971da177e4SLinus Torvalds do { 1981da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 199db64fe02SNick Piggin err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 2001da177e4SLinus Torvalds if (err) 201bf88c8c8SFigo.zhang return err; 2021da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 203db64fe02SNick Piggin 204db64fe02SNick Piggin return nr; 2051da177e4SLinus Torvalds } 2061da177e4SLinus Torvalds 2078fc48985STejun Heo static int vmap_page_range(unsigned long start, unsigned long end, 2088fc48985STejun Heo pgprot_t prot, struct page **pages) 2098fc48985STejun Heo { 2108fc48985STejun Heo int ret; 2118fc48985STejun Heo 2128fc48985STejun Heo ret = vmap_page_range_noflush(start, end, prot, pages); 2138fc48985STejun Heo flush_cache_vmap(start, end); 2148fc48985STejun Heo return ret; 2158fc48985STejun Heo } 2168fc48985STejun Heo 21781ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 21873bdf0a6SLinus Torvalds { 21973bdf0a6SLinus Torvalds /* 220ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 22173bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 22273bdf0a6SLinus Torvalds * just put it in the vmalloc space. 22373bdf0a6SLinus Torvalds */ 22473bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 22573bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 22673bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 22773bdf0a6SLinus Torvalds return 1; 22873bdf0a6SLinus Torvalds #endif 22973bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 23073bdf0a6SLinus Torvalds } 23173bdf0a6SLinus Torvalds 23248667e7aSChristoph Lameter /* 233add688fbSmalc * Walk a vmap address to the struct page it maps. 23448667e7aSChristoph Lameter */ 235add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 23648667e7aSChristoph Lameter { 23748667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 238add688fbSmalc struct page *page = NULL; 23948667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 24048667e7aSChristoph Lameter 2417aa413deSIngo Molnar /* 2427aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2437aa413deSIngo Molnar * architectures that do not vmalloc module space 2447aa413deSIngo Molnar */ 24573bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 24659ea7463SJiri Slaby 24748667e7aSChristoph Lameter if (!pgd_none(*pgd)) { 248db64fe02SNick Piggin pud_t *pud = pud_offset(pgd, addr); 24948667e7aSChristoph Lameter if (!pud_none(*pud)) { 250db64fe02SNick Piggin pmd_t *pmd = pmd_offset(pud, addr); 25148667e7aSChristoph Lameter if (!pmd_none(*pmd)) { 252db64fe02SNick Piggin pte_t *ptep, pte; 253db64fe02SNick Piggin 25448667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 25548667e7aSChristoph Lameter pte = *ptep; 25648667e7aSChristoph Lameter if (pte_present(pte)) 257add688fbSmalc page = pte_page(pte); 25848667e7aSChristoph Lameter pte_unmap(ptep); 25948667e7aSChristoph Lameter } 26048667e7aSChristoph Lameter } 26148667e7aSChristoph Lameter } 262add688fbSmalc return page; 263ece86e22SJianyu Zhan } 264ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 265ece86e22SJianyu Zhan 266add688fbSmalc /* 267add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 268add688fbSmalc */ 269add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 270add688fbSmalc { 271add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 272add688fbSmalc } 273add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 274add688fbSmalc 275db64fe02SNick Piggin 276db64fe02SNick Piggin /*** Global kva allocator ***/ 277db64fe02SNick Piggin 278db64fe02SNick Piggin #define VM_VM_AREA 0x04 279db64fe02SNick Piggin 280db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 281f1c4069eSJoonsoo Kim /* Export for kexec only */ 282f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list); 28380c4bd7aSChris Wilson static LLIST_HEAD(vmap_purge_list); 28489699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 28589699605SNick Piggin 28689699605SNick Piggin /* The vmap cache globals are protected by vmap_area_lock */ 28789699605SNick Piggin static struct rb_node *free_vmap_cache; 28889699605SNick Piggin static unsigned long cached_hole_size; 28989699605SNick Piggin static unsigned long cached_vstart; 29089699605SNick Piggin static unsigned long cached_align; 29189699605SNick Piggin 292ca23e405STejun Heo static unsigned long vmap_area_pcpu_hole; 293db64fe02SNick Piggin 294db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 2951da177e4SLinus Torvalds { 296db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 297db64fe02SNick Piggin 298db64fe02SNick Piggin while (n) { 299db64fe02SNick Piggin struct vmap_area *va; 300db64fe02SNick Piggin 301db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 302db64fe02SNick Piggin if (addr < va->va_start) 303db64fe02SNick Piggin n = n->rb_left; 304cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 305db64fe02SNick Piggin n = n->rb_right; 306db64fe02SNick Piggin else 307db64fe02SNick Piggin return va; 308db64fe02SNick Piggin } 309db64fe02SNick Piggin 310db64fe02SNick Piggin return NULL; 311db64fe02SNick Piggin } 312db64fe02SNick Piggin 313db64fe02SNick Piggin static void __insert_vmap_area(struct vmap_area *va) 314db64fe02SNick Piggin { 315db64fe02SNick Piggin struct rb_node **p = &vmap_area_root.rb_node; 316db64fe02SNick Piggin struct rb_node *parent = NULL; 317db64fe02SNick Piggin struct rb_node *tmp; 318db64fe02SNick Piggin 319db64fe02SNick Piggin while (*p) { 320170168d0SNamhyung Kim struct vmap_area *tmp_va; 321db64fe02SNick Piggin 322db64fe02SNick Piggin parent = *p; 323170168d0SNamhyung Kim tmp_va = rb_entry(parent, struct vmap_area, rb_node); 324170168d0SNamhyung Kim if (va->va_start < tmp_va->va_end) 325db64fe02SNick Piggin p = &(*p)->rb_left; 326170168d0SNamhyung Kim else if (va->va_end > tmp_va->va_start) 327db64fe02SNick Piggin p = &(*p)->rb_right; 328db64fe02SNick Piggin else 329db64fe02SNick Piggin BUG(); 330db64fe02SNick Piggin } 331db64fe02SNick Piggin 332db64fe02SNick Piggin rb_link_node(&va->rb_node, parent, p); 333db64fe02SNick Piggin rb_insert_color(&va->rb_node, &vmap_area_root); 334db64fe02SNick Piggin 3354341fa45SJoonsoo Kim /* address-sort this list */ 336db64fe02SNick Piggin tmp = rb_prev(&va->rb_node); 337db64fe02SNick Piggin if (tmp) { 338db64fe02SNick Piggin struct vmap_area *prev; 339db64fe02SNick Piggin prev = rb_entry(tmp, struct vmap_area, rb_node); 340db64fe02SNick Piggin list_add_rcu(&va->list, &prev->list); 341db64fe02SNick Piggin } else 342db64fe02SNick Piggin list_add_rcu(&va->list, &vmap_area_list); 343db64fe02SNick Piggin } 344db64fe02SNick Piggin 345db64fe02SNick Piggin static void purge_vmap_area_lazy(void); 346db64fe02SNick Piggin 3474da56b99SChris Wilson static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 3484da56b99SChris Wilson 349db64fe02SNick Piggin /* 350db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 351db64fe02SNick Piggin * vstart and vend. 352db64fe02SNick Piggin */ 353db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 354db64fe02SNick Piggin unsigned long align, 355db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 356db64fe02SNick Piggin int node, gfp_t gfp_mask) 357db64fe02SNick Piggin { 358db64fe02SNick Piggin struct vmap_area *va; 359db64fe02SNick Piggin struct rb_node *n; 3601da177e4SLinus Torvalds unsigned long addr; 361db64fe02SNick Piggin int purged = 0; 36289699605SNick Piggin struct vmap_area *first; 363db64fe02SNick Piggin 3647766970cSNick Piggin BUG_ON(!size); 365891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 36689699605SNick Piggin BUG_ON(!is_power_of_2(align)); 367db64fe02SNick Piggin 3685803ed29SChristoph Hellwig might_sleep(); 3694da56b99SChris Wilson 370db64fe02SNick Piggin va = kmalloc_node(sizeof(struct vmap_area), 371db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 372db64fe02SNick Piggin if (unlikely(!va)) 373db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 374db64fe02SNick Piggin 3757f88f88fSCatalin Marinas /* 3767f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 3777f88f88fSCatalin Marinas * to avoid false negatives. 3787f88f88fSCatalin Marinas */ 3797f88f88fSCatalin Marinas kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); 3807f88f88fSCatalin Marinas 381db64fe02SNick Piggin retry: 382db64fe02SNick Piggin spin_lock(&vmap_area_lock); 38389699605SNick Piggin /* 38489699605SNick Piggin * Invalidate cache if we have more permissive parameters. 38589699605SNick Piggin * cached_hole_size notes the largest hole noticed _below_ 38689699605SNick Piggin * the vmap_area cached in free_vmap_cache: if size fits 38789699605SNick Piggin * into that hole, we want to scan from vstart to reuse 38889699605SNick Piggin * the hole instead of allocating above free_vmap_cache. 38989699605SNick Piggin * Note that __free_vmap_area may update free_vmap_cache 39089699605SNick Piggin * without updating cached_hole_size or cached_align. 39189699605SNick Piggin */ 39289699605SNick Piggin if (!free_vmap_cache || 39389699605SNick Piggin size < cached_hole_size || 39489699605SNick Piggin vstart < cached_vstart || 39589699605SNick Piggin align < cached_align) { 39689699605SNick Piggin nocache: 39789699605SNick Piggin cached_hole_size = 0; 39889699605SNick Piggin free_vmap_cache = NULL; 39989699605SNick Piggin } 40089699605SNick Piggin /* record if we encounter less permissive parameters */ 40189699605SNick Piggin cached_vstart = vstart; 40289699605SNick Piggin cached_align = align; 40389699605SNick Piggin 40489699605SNick Piggin /* find starting point for our search */ 40589699605SNick Piggin if (free_vmap_cache) { 40689699605SNick Piggin first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 407248ac0e1SJohannes Weiner addr = ALIGN(first->va_end, align); 40889699605SNick Piggin if (addr < vstart) 40989699605SNick Piggin goto nocache; 410bcb615a8SZhang Yanfei if (addr + size < addr) 4117766970cSNick Piggin goto overflow; 4127766970cSNick Piggin 41389699605SNick Piggin } else { 41489699605SNick Piggin addr = ALIGN(vstart, align); 415bcb615a8SZhang Yanfei if (addr + size < addr) 41689699605SNick Piggin goto overflow; 417db64fe02SNick Piggin 41889699605SNick Piggin n = vmap_area_root.rb_node; 41989699605SNick Piggin first = NULL; 42089699605SNick Piggin 42189699605SNick Piggin while (n) { 422db64fe02SNick Piggin struct vmap_area *tmp; 423db64fe02SNick Piggin tmp = rb_entry(n, struct vmap_area, rb_node); 424db64fe02SNick Piggin if (tmp->va_end >= addr) { 425db64fe02SNick Piggin first = tmp; 42689699605SNick Piggin if (tmp->va_start <= addr) 42789699605SNick Piggin break; 428db64fe02SNick Piggin n = n->rb_left; 42989699605SNick Piggin } else 430db64fe02SNick Piggin n = n->rb_right; 431db64fe02SNick Piggin } 432db64fe02SNick Piggin 433db64fe02SNick Piggin if (!first) 434db64fe02SNick Piggin goto found; 435db64fe02SNick Piggin } 436db64fe02SNick Piggin 43789699605SNick Piggin /* from the starting point, walk areas until a suitable hole is found */ 438248ac0e1SJohannes Weiner while (addr + size > first->va_start && addr + size <= vend) { 43989699605SNick Piggin if (addr + cached_hole_size < first->va_start) 44089699605SNick Piggin cached_hole_size = first->va_start - addr; 441248ac0e1SJohannes Weiner addr = ALIGN(first->va_end, align); 442bcb615a8SZhang Yanfei if (addr + size < addr) 4437766970cSNick Piggin goto overflow; 444db64fe02SNick Piggin 44592ca922fSHong zhi guo if (list_is_last(&first->list, &vmap_area_list)) 446db64fe02SNick Piggin goto found; 44792ca922fSHong zhi guo 4486219c2a2SGeliang Tang first = list_next_entry(first, list); 449db64fe02SNick Piggin } 45089699605SNick Piggin 451db64fe02SNick Piggin found: 45289699605SNick Piggin if (addr + size > vend) 45389699605SNick Piggin goto overflow; 45489699605SNick Piggin 45589699605SNick Piggin va->va_start = addr; 45689699605SNick Piggin va->va_end = addr + size; 45789699605SNick Piggin va->flags = 0; 45889699605SNick Piggin __insert_vmap_area(va); 45989699605SNick Piggin free_vmap_cache = &va->rb_node; 46089699605SNick Piggin spin_unlock(&vmap_area_lock); 46189699605SNick Piggin 46261e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 46389699605SNick Piggin BUG_ON(va->va_start < vstart); 46489699605SNick Piggin BUG_ON(va->va_end > vend); 46589699605SNick Piggin 46689699605SNick Piggin return va; 46789699605SNick Piggin 4687766970cSNick Piggin overflow: 469db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 470db64fe02SNick Piggin if (!purged) { 471db64fe02SNick Piggin purge_vmap_area_lazy(); 472db64fe02SNick Piggin purged = 1; 473db64fe02SNick Piggin goto retry; 474db64fe02SNick Piggin } 4754da56b99SChris Wilson 4764da56b99SChris Wilson if (gfpflags_allow_blocking(gfp_mask)) { 4774da56b99SChris Wilson unsigned long freed = 0; 4784da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 4794da56b99SChris Wilson if (freed > 0) { 4804da56b99SChris Wilson purged = 0; 4814da56b99SChris Wilson goto retry; 4824da56b99SChris Wilson } 4834da56b99SChris Wilson } 4844da56b99SChris Wilson 485db64fe02SNick Piggin if (printk_ratelimit()) 486756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 487756a025fSJoe Perches size); 4882498ce42SRalph Wuerthner kfree(va); 489db64fe02SNick Piggin return ERR_PTR(-EBUSY); 490db64fe02SNick Piggin } 491db64fe02SNick Piggin 4924da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 4934da56b99SChris Wilson { 4944da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 4954da56b99SChris Wilson } 4964da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 4974da56b99SChris Wilson 4984da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 4994da56b99SChris Wilson { 5004da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 5014da56b99SChris Wilson } 5024da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 5034da56b99SChris Wilson 504db64fe02SNick Piggin static void __free_vmap_area(struct vmap_area *va) 505db64fe02SNick Piggin { 506db64fe02SNick Piggin BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 50789699605SNick Piggin 50889699605SNick Piggin if (free_vmap_cache) { 50989699605SNick Piggin if (va->va_end < cached_vstart) { 51089699605SNick Piggin free_vmap_cache = NULL; 51189699605SNick Piggin } else { 51289699605SNick Piggin struct vmap_area *cache; 51389699605SNick Piggin cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 51489699605SNick Piggin if (va->va_start <= cache->va_start) { 51589699605SNick Piggin free_vmap_cache = rb_prev(&va->rb_node); 51689699605SNick Piggin /* 51789699605SNick Piggin * We don't try to update cached_hole_size or 51889699605SNick Piggin * cached_align, but it won't go very wrong. 51989699605SNick Piggin */ 52089699605SNick Piggin } 52189699605SNick Piggin } 52289699605SNick Piggin } 523db64fe02SNick Piggin rb_erase(&va->rb_node, &vmap_area_root); 524db64fe02SNick Piggin RB_CLEAR_NODE(&va->rb_node); 525db64fe02SNick Piggin list_del_rcu(&va->list); 526db64fe02SNick Piggin 527ca23e405STejun Heo /* 528ca23e405STejun Heo * Track the highest possible candidate for pcpu area 529ca23e405STejun Heo * allocation. Areas outside of vmalloc area can be returned 530ca23e405STejun Heo * here too, consider only end addresses which fall inside 531ca23e405STejun Heo * vmalloc area proper. 532ca23e405STejun Heo */ 533ca23e405STejun Heo if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 534ca23e405STejun Heo vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 535ca23e405STejun Heo 53614769de9SLai Jiangshan kfree_rcu(va, rcu_head); 537db64fe02SNick Piggin } 538db64fe02SNick Piggin 539db64fe02SNick Piggin /* 540db64fe02SNick Piggin * Free a region of KVA allocated by alloc_vmap_area 541db64fe02SNick Piggin */ 542db64fe02SNick Piggin static void free_vmap_area(struct vmap_area *va) 543db64fe02SNick Piggin { 544db64fe02SNick Piggin spin_lock(&vmap_area_lock); 545db64fe02SNick Piggin __free_vmap_area(va); 546db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 547db64fe02SNick Piggin } 548db64fe02SNick Piggin 549db64fe02SNick Piggin /* 550db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 551db64fe02SNick Piggin */ 552db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 553db64fe02SNick Piggin { 554db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 555db64fe02SNick Piggin } 556db64fe02SNick Piggin 557cd52858cSNick Piggin static void vmap_debug_free_range(unsigned long start, unsigned long end) 558cd52858cSNick Piggin { 559cd52858cSNick Piggin /* 560f48d97f3SJoonsoo Kim * Unmap page tables and force a TLB flush immediately if pagealloc 561f48d97f3SJoonsoo Kim * debugging is enabled. This catches use after free bugs similarly to 562f48d97f3SJoonsoo Kim * those in linear kernel virtual address space after a page has been 563f48d97f3SJoonsoo Kim * freed. 564cd52858cSNick Piggin * 565f48d97f3SJoonsoo Kim * All the lazy freeing logic is still retained, in order to minimise 566f48d97f3SJoonsoo Kim * intrusiveness of this debugging feature. 567cd52858cSNick Piggin * 568f48d97f3SJoonsoo Kim * This is going to be *slow* (linear kernel virtual address debugging 569f48d97f3SJoonsoo Kim * doesn't do a broadcast TLB flush so it is a lot faster). 570cd52858cSNick Piggin */ 571f48d97f3SJoonsoo Kim if (debug_pagealloc_enabled()) { 572cd52858cSNick Piggin vunmap_page_range(start, end); 573cd52858cSNick Piggin flush_tlb_kernel_range(start, end); 574f48d97f3SJoonsoo Kim } 575cd52858cSNick Piggin } 576cd52858cSNick Piggin 577db64fe02SNick Piggin /* 578db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 579db64fe02SNick Piggin * before attempting to purge with a TLB flush. 580db64fe02SNick Piggin * 581db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 582db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 583db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 584db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 585db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 586db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 587db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 588db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 589db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 590db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 591db64fe02SNick Piggin * becomes a problem on bigger systems. 592db64fe02SNick Piggin */ 593db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 594db64fe02SNick Piggin { 595db64fe02SNick Piggin unsigned int log; 596db64fe02SNick Piggin 597db64fe02SNick Piggin log = fls(num_online_cpus()); 598db64fe02SNick Piggin 599db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 600db64fe02SNick Piggin } 601db64fe02SNick Piggin 602db64fe02SNick Piggin static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 603db64fe02SNick Piggin 6040574ecd1SChristoph Hellwig /* 6050574ecd1SChristoph Hellwig * Serialize vmap purging. There is no actual criticial section protected 6060574ecd1SChristoph Hellwig * by this look, but we want to avoid concurrent calls for performance 6070574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 6080574ecd1SChristoph Hellwig */ 609f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 6100574ecd1SChristoph Hellwig 61102b709dfSNick Piggin /* for per-CPU blocks */ 61202b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 61302b709dfSNick Piggin 614db64fe02SNick Piggin /* 6153ee48b6aSCliff Wickman * called before a call to iounmap() if the caller wants vm_area_struct's 6163ee48b6aSCliff Wickman * immediately freed. 6173ee48b6aSCliff Wickman */ 6183ee48b6aSCliff Wickman void set_iounmap_nonlazy(void) 6193ee48b6aSCliff Wickman { 6203ee48b6aSCliff Wickman atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); 6213ee48b6aSCliff Wickman } 6223ee48b6aSCliff Wickman 6233ee48b6aSCliff Wickman /* 624db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 625db64fe02SNick Piggin */ 6260574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 627db64fe02SNick Piggin { 62880c4bd7aSChris Wilson struct llist_node *valist; 629db64fe02SNick Piggin struct vmap_area *va; 630cbb76676SVegard Nossum struct vmap_area *n_va; 631763b218dSJoel Fernandes bool do_free = false; 632db64fe02SNick Piggin 6330574ecd1SChristoph Hellwig lockdep_assert_held(&vmap_purge_lock); 63402b709dfSNick Piggin 63580c4bd7aSChris Wilson valist = llist_del_all(&vmap_purge_list); 63680c4bd7aSChris Wilson llist_for_each_entry(va, valist, purge_list) { 6370574ecd1SChristoph Hellwig if (va->va_start < start) 6380574ecd1SChristoph Hellwig start = va->va_start; 6390574ecd1SChristoph Hellwig if (va->va_end > end) 6400574ecd1SChristoph Hellwig end = va->va_end; 641763b218dSJoel Fernandes do_free = true; 642db64fe02SNick Piggin } 643db64fe02SNick Piggin 644763b218dSJoel Fernandes if (!do_free) 6450574ecd1SChristoph Hellwig return false; 6460574ecd1SChristoph Hellwig 6470574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 648db64fe02SNick Piggin 649db64fe02SNick Piggin spin_lock(&vmap_area_lock); 650763b218dSJoel Fernandes llist_for_each_entry_safe(va, n_va, valist, purge_list) { 651763b218dSJoel Fernandes int nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 652763b218dSJoel Fernandes 653db64fe02SNick Piggin __free_vmap_area(va); 654763b218dSJoel Fernandes atomic_sub(nr, &vmap_lazy_nr); 655763b218dSJoel Fernandes cond_resched_lock(&vmap_area_lock); 656763b218dSJoel Fernandes } 657db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 6580574ecd1SChristoph Hellwig return true; 659db64fe02SNick Piggin } 660db64fe02SNick Piggin 661db64fe02SNick Piggin /* 662496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 663496850e5SNick Piggin * is already purging. 664496850e5SNick Piggin */ 665496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 666496850e5SNick Piggin { 667f9e09977SChristoph Hellwig if (mutex_trylock(&vmap_purge_lock)) { 6680574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 669f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 6700574ecd1SChristoph Hellwig } 671496850e5SNick Piggin } 672496850e5SNick Piggin 673496850e5SNick Piggin /* 674db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 675db64fe02SNick Piggin */ 676db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 677db64fe02SNick Piggin { 678f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 6790574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 6800574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 681f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 682db64fe02SNick Piggin } 683db64fe02SNick Piggin 684db64fe02SNick Piggin /* 68564141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 68664141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 68764141da5SJeremy Fitzhardinge * previously. 688db64fe02SNick Piggin */ 68964141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 690db64fe02SNick Piggin { 69180c4bd7aSChris Wilson int nr_lazy; 69280c4bd7aSChris Wilson 69380c4bd7aSChris Wilson nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, 69480c4bd7aSChris Wilson &vmap_lazy_nr); 69580c4bd7aSChris Wilson 69680c4bd7aSChris Wilson /* After this point, we may free va at any time */ 69780c4bd7aSChris Wilson llist_add(&va->purge_list, &vmap_purge_list); 69880c4bd7aSChris Wilson 69980c4bd7aSChris Wilson if (unlikely(nr_lazy > lazy_max_pages())) 700496850e5SNick Piggin try_purge_vmap_area_lazy(); 701db64fe02SNick Piggin } 702db64fe02SNick Piggin 703b29acbdcSNick Piggin /* 704b29acbdcSNick Piggin * Free and unmap a vmap area 705b29acbdcSNick Piggin */ 706b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 707b29acbdcSNick Piggin { 708b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 709c8eef01eSChristoph Hellwig unmap_vmap_area(va); 710c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 711b29acbdcSNick Piggin } 712b29acbdcSNick Piggin 713db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 714db64fe02SNick Piggin { 715db64fe02SNick Piggin struct vmap_area *va; 716db64fe02SNick Piggin 717db64fe02SNick Piggin spin_lock(&vmap_area_lock); 718db64fe02SNick Piggin va = __find_vmap_area(addr); 719db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 720db64fe02SNick Piggin 721db64fe02SNick Piggin return va; 722db64fe02SNick Piggin } 723db64fe02SNick Piggin 724db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 725db64fe02SNick Piggin 726db64fe02SNick Piggin /* 727db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 728db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 729db64fe02SNick Piggin */ 730db64fe02SNick Piggin /* 731db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 732db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 733db64fe02SNick Piggin * instead (we just need a rough idea) 734db64fe02SNick Piggin */ 735db64fe02SNick Piggin #if BITS_PER_LONG == 32 736db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 737db64fe02SNick Piggin #else 738db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 739db64fe02SNick Piggin #endif 740db64fe02SNick Piggin 741db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 742db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 743db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 744db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 745db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 746db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 747f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 748f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 749db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 750f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 751db64fe02SNick Piggin 752db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 753db64fe02SNick Piggin 7549b463334SJeremy Fitzhardinge static bool vmap_initialized __read_mostly = false; 7559b463334SJeremy Fitzhardinge 756db64fe02SNick Piggin struct vmap_block_queue { 757db64fe02SNick Piggin spinlock_t lock; 758db64fe02SNick Piggin struct list_head free; 759db64fe02SNick Piggin }; 760db64fe02SNick Piggin 761db64fe02SNick Piggin struct vmap_block { 762db64fe02SNick Piggin spinlock_t lock; 763db64fe02SNick Piggin struct vmap_area *va; 764db64fe02SNick Piggin unsigned long free, dirty; 7657d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 766db64fe02SNick Piggin struct list_head free_list; 767db64fe02SNick Piggin struct rcu_head rcu_head; 76802b709dfSNick Piggin struct list_head purge; 769db64fe02SNick Piggin }; 770db64fe02SNick Piggin 771db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 772db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 773db64fe02SNick Piggin 774db64fe02SNick Piggin /* 775db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 776db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 777db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 778db64fe02SNick Piggin */ 779db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 780db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 781db64fe02SNick Piggin 782db64fe02SNick Piggin /* 783db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 784db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 785db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 786db64fe02SNick Piggin * big problem. 787db64fe02SNick Piggin */ 788db64fe02SNick Piggin 789db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 790db64fe02SNick Piggin { 791db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 792db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 793db64fe02SNick Piggin return addr; 794db64fe02SNick Piggin } 795db64fe02SNick Piggin 796cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 797cf725ce2SRoman Pen { 798cf725ce2SRoman Pen unsigned long addr; 799cf725ce2SRoman Pen 800cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 801cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 802cf725ce2SRoman Pen return (void *)addr; 803cf725ce2SRoman Pen } 804cf725ce2SRoman Pen 805cf725ce2SRoman Pen /** 806cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 807cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 808cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 809cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 810cf725ce2SRoman Pen * 811cf725ce2SRoman Pen * Returns: virtual address in a newly allocated block or ERR_PTR(-errno) 812cf725ce2SRoman Pen */ 813cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 814db64fe02SNick Piggin { 815db64fe02SNick Piggin struct vmap_block_queue *vbq; 816db64fe02SNick Piggin struct vmap_block *vb; 817db64fe02SNick Piggin struct vmap_area *va; 818db64fe02SNick Piggin unsigned long vb_idx; 819db64fe02SNick Piggin int node, err; 820cf725ce2SRoman Pen void *vaddr; 821db64fe02SNick Piggin 822db64fe02SNick Piggin node = numa_node_id(); 823db64fe02SNick Piggin 824db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 825db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 826db64fe02SNick Piggin if (unlikely(!vb)) 827db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 828db64fe02SNick Piggin 829db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 830db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 831db64fe02SNick Piggin node, gfp_mask); 832ddf9c6d4STobias Klauser if (IS_ERR(va)) { 833db64fe02SNick Piggin kfree(vb); 834e7d86340SJulia Lawall return ERR_CAST(va); 835db64fe02SNick Piggin } 836db64fe02SNick Piggin 837db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 838db64fe02SNick Piggin if (unlikely(err)) { 839db64fe02SNick Piggin kfree(vb); 840db64fe02SNick Piggin free_vmap_area(va); 841db64fe02SNick Piggin return ERR_PTR(err); 842db64fe02SNick Piggin } 843db64fe02SNick Piggin 844cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 845db64fe02SNick Piggin spin_lock_init(&vb->lock); 846db64fe02SNick Piggin vb->va = va; 847cf725ce2SRoman Pen /* At least something should be left free */ 848cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 849cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 850db64fe02SNick Piggin vb->dirty = 0; 8517d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 8527d61bfe8SRoman Pen vb->dirty_max = 0; 853db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 854db64fe02SNick Piggin 855db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 856db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 857db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 858db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 859db64fe02SNick Piggin BUG_ON(err); 860db64fe02SNick Piggin radix_tree_preload_end(); 861db64fe02SNick Piggin 862db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 863db64fe02SNick Piggin spin_lock(&vbq->lock); 86468ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 865db64fe02SNick Piggin spin_unlock(&vbq->lock); 8663f04ba85STejun Heo put_cpu_var(vmap_block_queue); 867db64fe02SNick Piggin 868cf725ce2SRoman Pen return vaddr; 869db64fe02SNick Piggin } 870db64fe02SNick Piggin 871db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 872db64fe02SNick Piggin { 873db64fe02SNick Piggin struct vmap_block *tmp; 874db64fe02SNick Piggin unsigned long vb_idx; 875db64fe02SNick Piggin 876db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 877db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 878db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 879db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 880db64fe02SNick Piggin BUG_ON(tmp != vb); 881db64fe02SNick Piggin 88264141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 88322a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 884db64fe02SNick Piggin } 885db64fe02SNick Piggin 88602b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 88702b709dfSNick Piggin { 88802b709dfSNick Piggin LIST_HEAD(purge); 88902b709dfSNick Piggin struct vmap_block *vb; 89002b709dfSNick Piggin struct vmap_block *n_vb; 89102b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 89202b709dfSNick Piggin 89302b709dfSNick Piggin rcu_read_lock(); 89402b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 89502b709dfSNick Piggin 89602b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 89702b709dfSNick Piggin continue; 89802b709dfSNick Piggin 89902b709dfSNick Piggin spin_lock(&vb->lock); 90002b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 90102b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 90202b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 9037d61bfe8SRoman Pen vb->dirty_min = 0; 9047d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 90502b709dfSNick Piggin spin_lock(&vbq->lock); 90602b709dfSNick Piggin list_del_rcu(&vb->free_list); 90702b709dfSNick Piggin spin_unlock(&vbq->lock); 90802b709dfSNick Piggin spin_unlock(&vb->lock); 90902b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 91002b709dfSNick Piggin } else 91102b709dfSNick Piggin spin_unlock(&vb->lock); 91202b709dfSNick Piggin } 91302b709dfSNick Piggin rcu_read_unlock(); 91402b709dfSNick Piggin 91502b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 91602b709dfSNick Piggin list_del(&vb->purge); 91702b709dfSNick Piggin free_vmap_block(vb); 91802b709dfSNick Piggin } 91902b709dfSNick Piggin } 92002b709dfSNick Piggin 92102b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 92202b709dfSNick Piggin { 92302b709dfSNick Piggin int cpu; 92402b709dfSNick Piggin 92502b709dfSNick Piggin for_each_possible_cpu(cpu) 92602b709dfSNick Piggin purge_fragmented_blocks(cpu); 92702b709dfSNick Piggin } 92802b709dfSNick Piggin 929db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 930db64fe02SNick Piggin { 931db64fe02SNick Piggin struct vmap_block_queue *vbq; 932db64fe02SNick Piggin struct vmap_block *vb; 933cf725ce2SRoman Pen void *vaddr = NULL; 934db64fe02SNick Piggin unsigned int order; 935db64fe02SNick Piggin 936891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 937db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 938aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 939aa91c4d8SJan Kara /* 940aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 941aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 942aa91c4d8SJan Kara * early. 943aa91c4d8SJan Kara */ 944aa91c4d8SJan Kara return NULL; 945aa91c4d8SJan Kara } 946db64fe02SNick Piggin order = get_order(size); 947db64fe02SNick Piggin 948db64fe02SNick Piggin rcu_read_lock(); 949db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 950db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 951cf725ce2SRoman Pen unsigned long pages_off; 952db64fe02SNick Piggin 953db64fe02SNick Piggin spin_lock(&vb->lock); 954cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 955cf725ce2SRoman Pen spin_unlock(&vb->lock); 956cf725ce2SRoman Pen continue; 957cf725ce2SRoman Pen } 95802b709dfSNick Piggin 959cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 960cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 961db64fe02SNick Piggin vb->free -= 1UL << order; 962db64fe02SNick Piggin if (vb->free == 0) { 963db64fe02SNick Piggin spin_lock(&vbq->lock); 964de560423SNick Piggin list_del_rcu(&vb->free_list); 965db64fe02SNick Piggin spin_unlock(&vbq->lock); 966db64fe02SNick Piggin } 967cf725ce2SRoman Pen 968db64fe02SNick Piggin spin_unlock(&vb->lock); 969db64fe02SNick Piggin break; 970db64fe02SNick Piggin } 97102b709dfSNick Piggin 9723f04ba85STejun Heo put_cpu_var(vmap_block_queue); 973db64fe02SNick Piggin rcu_read_unlock(); 974db64fe02SNick Piggin 975cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 976cf725ce2SRoman Pen if (!vaddr) 977cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 978db64fe02SNick Piggin 979cf725ce2SRoman Pen return vaddr; 980db64fe02SNick Piggin } 981db64fe02SNick Piggin 982db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 983db64fe02SNick Piggin { 984db64fe02SNick Piggin unsigned long offset; 985db64fe02SNick Piggin unsigned long vb_idx; 986db64fe02SNick Piggin unsigned int order; 987db64fe02SNick Piggin struct vmap_block *vb; 988db64fe02SNick Piggin 989891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 990db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 991b29acbdcSNick Piggin 992b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 993b29acbdcSNick Piggin 994db64fe02SNick Piggin order = get_order(size); 995db64fe02SNick Piggin 996db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 9977d61bfe8SRoman Pen offset >>= PAGE_SHIFT; 998db64fe02SNick Piggin 999db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 1000db64fe02SNick Piggin rcu_read_lock(); 1001db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 1002db64fe02SNick Piggin rcu_read_unlock(); 1003db64fe02SNick Piggin BUG_ON(!vb); 1004db64fe02SNick Piggin 100564141da5SJeremy Fitzhardinge vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 100664141da5SJeremy Fitzhardinge 1007db64fe02SNick Piggin spin_lock(&vb->lock); 10087d61bfe8SRoman Pen 10097d61bfe8SRoman Pen /* Expand dirty range */ 10107d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 10117d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 1012d086817dSMinChan Kim 1013db64fe02SNick Piggin vb->dirty += 1UL << order; 1014db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 1015de560423SNick Piggin BUG_ON(vb->free); 1016db64fe02SNick Piggin spin_unlock(&vb->lock); 1017db64fe02SNick Piggin free_vmap_block(vb); 1018db64fe02SNick Piggin } else 1019db64fe02SNick Piggin spin_unlock(&vb->lock); 1020db64fe02SNick Piggin } 1021db64fe02SNick Piggin 1022db64fe02SNick Piggin /** 1023db64fe02SNick Piggin * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 1024db64fe02SNick Piggin * 1025db64fe02SNick Piggin * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1026db64fe02SNick Piggin * to amortize TLB flushing overheads. What this means is that any page you 1027db64fe02SNick Piggin * have now, may, in a former life, have been mapped into kernel virtual 1028db64fe02SNick Piggin * address by the vmap layer and so there might be some CPUs with TLB entries 1029db64fe02SNick Piggin * still referencing that page (additional to the regular 1:1 kernel mapping). 1030db64fe02SNick Piggin * 1031db64fe02SNick Piggin * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1032db64fe02SNick Piggin * be sure that none of the pages we have control over will have any aliases 1033db64fe02SNick Piggin * from the vmap layer. 1034db64fe02SNick Piggin */ 1035db64fe02SNick Piggin void vm_unmap_aliases(void) 1036db64fe02SNick Piggin { 1037db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 1038db64fe02SNick Piggin int cpu; 1039db64fe02SNick Piggin int flush = 0; 1040db64fe02SNick Piggin 10419b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 10429b463334SJeremy Fitzhardinge return; 10439b463334SJeremy Fitzhardinge 10445803ed29SChristoph Hellwig might_sleep(); 10455803ed29SChristoph Hellwig 1046db64fe02SNick Piggin for_each_possible_cpu(cpu) { 1047db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1048db64fe02SNick Piggin struct vmap_block *vb; 1049db64fe02SNick Piggin 1050db64fe02SNick Piggin rcu_read_lock(); 1051db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1052db64fe02SNick Piggin spin_lock(&vb->lock); 10537d61bfe8SRoman Pen if (vb->dirty) { 10547d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 1055db64fe02SNick Piggin unsigned long s, e; 1056b136be5eSJoonsoo Kim 10577d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 10587d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 1059db64fe02SNick Piggin 10607d61bfe8SRoman Pen start = min(s, start); 10617d61bfe8SRoman Pen end = max(e, end); 10627d61bfe8SRoman Pen 1063db64fe02SNick Piggin flush = 1; 1064db64fe02SNick Piggin } 1065db64fe02SNick Piggin spin_unlock(&vb->lock); 1066db64fe02SNick Piggin } 1067db64fe02SNick Piggin rcu_read_unlock(); 1068db64fe02SNick Piggin } 1069db64fe02SNick Piggin 1070f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 10710574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 10720574ecd1SChristoph Hellwig if (!__purge_vmap_area_lazy(start, end) && flush) 10730574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 1074f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1075db64fe02SNick Piggin } 1076db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1077db64fe02SNick Piggin 1078db64fe02SNick Piggin /** 1079db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1080db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 1081db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1082db64fe02SNick Piggin */ 1083db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 1084db64fe02SNick Piggin { 108565ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1086db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 10879c3acf60SChristoph Hellwig struct vmap_area *va; 1088db64fe02SNick Piggin 10895803ed29SChristoph Hellwig might_sleep(); 1090db64fe02SNick Piggin BUG_ON(!addr); 1091db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 1092db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 1093a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 1094db64fe02SNick Piggin 1095db64fe02SNick Piggin debug_check_no_locks_freed(mem, size); 1096cd52858cSNick Piggin vmap_debug_free_range(addr, addr+size); 1097db64fe02SNick Piggin 10989c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 1099db64fe02SNick Piggin vb_free(mem, size); 11009c3acf60SChristoph Hellwig return; 11019c3acf60SChristoph Hellwig } 11029c3acf60SChristoph Hellwig 11039c3acf60SChristoph Hellwig va = find_vmap_area(addr); 11049c3acf60SChristoph Hellwig BUG_ON(!va); 11059c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 1106db64fe02SNick Piggin } 1107db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 1108db64fe02SNick Piggin 1109db64fe02SNick Piggin /** 1110db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1111db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 1112db64fe02SNick Piggin * @count: number of pages 1113db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 1114db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1115e99c97adSRandy Dunlap * 111636437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 111736437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 111836437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 111936437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 112036437638SGioh Kim * the end. Please use this function for short-lived objects. 112136437638SGioh Kim * 1122e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 1123db64fe02SNick Piggin */ 1124db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1125db64fe02SNick Piggin { 112665ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1127db64fe02SNick Piggin unsigned long addr; 1128db64fe02SNick Piggin void *mem; 1129db64fe02SNick Piggin 1130db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 1131db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 1132db64fe02SNick Piggin if (IS_ERR(mem)) 1133db64fe02SNick Piggin return NULL; 1134db64fe02SNick Piggin addr = (unsigned long)mem; 1135db64fe02SNick Piggin } else { 1136db64fe02SNick Piggin struct vmap_area *va; 1137db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 1138db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1139db64fe02SNick Piggin if (IS_ERR(va)) 1140db64fe02SNick Piggin return NULL; 1141db64fe02SNick Piggin 1142db64fe02SNick Piggin addr = va->va_start; 1143db64fe02SNick Piggin mem = (void *)addr; 1144db64fe02SNick Piggin } 1145db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1146db64fe02SNick Piggin vm_unmap_ram(mem, count); 1147db64fe02SNick Piggin return NULL; 1148db64fe02SNick Piggin } 1149db64fe02SNick Piggin return mem; 1150db64fe02SNick Piggin } 1151db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 1152db64fe02SNick Piggin 11534341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 1154f0aa6617STejun Heo /** 1155be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 1156be9b7335SNicolas Pitre * @vm: vm_struct to add 1157be9b7335SNicolas Pitre * 1158be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 1159be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1160be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 1161be9b7335SNicolas Pitre * 1162be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1163be9b7335SNicolas Pitre */ 1164be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 1165be9b7335SNicolas Pitre { 1166be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 1167be9b7335SNicolas Pitre 1168be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 1169be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1170be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 1171be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 1172be9b7335SNicolas Pitre break; 1173be9b7335SNicolas Pitre } else 1174be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 1175be9b7335SNicolas Pitre } 1176be9b7335SNicolas Pitre vm->next = *p; 1177be9b7335SNicolas Pitre *p = vm; 1178be9b7335SNicolas Pitre } 1179be9b7335SNicolas Pitre 1180be9b7335SNicolas Pitre /** 1181f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 1182f0aa6617STejun Heo * @vm: vm_struct to register 1183c0c0a293STejun Heo * @align: requested alignment 1184f0aa6617STejun Heo * 1185f0aa6617STejun Heo * This function is used to register kernel vm area before 1186f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 1187f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 1188f0aa6617STejun Heo * vm->addr contains the allocated address. 1189f0aa6617STejun Heo * 1190f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1191f0aa6617STejun Heo */ 1192c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1193f0aa6617STejun Heo { 1194f0aa6617STejun Heo static size_t vm_init_off __initdata; 1195c0c0a293STejun Heo unsigned long addr; 1196f0aa6617STejun Heo 1197c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 1198c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1199c0c0a293STejun Heo 1200c0c0a293STejun Heo vm->addr = (void *)addr; 1201f0aa6617STejun Heo 1202be9b7335SNicolas Pitre vm_area_add_early(vm); 1203f0aa6617STejun Heo } 1204f0aa6617STejun Heo 1205db64fe02SNick Piggin void __init vmalloc_init(void) 1206db64fe02SNick Piggin { 1207822c18f2SIvan Kokshaysky struct vmap_area *va; 1208822c18f2SIvan Kokshaysky struct vm_struct *tmp; 1209db64fe02SNick Piggin int i; 1210db64fe02SNick Piggin 1211db64fe02SNick Piggin for_each_possible_cpu(i) { 1212db64fe02SNick Piggin struct vmap_block_queue *vbq; 121332fcfd40SAl Viro struct vfree_deferred *p; 1214db64fe02SNick Piggin 1215db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1216db64fe02SNick Piggin spin_lock_init(&vbq->lock); 1217db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 121832fcfd40SAl Viro p = &per_cpu(vfree_deferred, i); 121932fcfd40SAl Viro init_llist_head(&p->list); 122032fcfd40SAl Viro INIT_WORK(&p->wq, free_work); 1221db64fe02SNick Piggin } 12229b463334SJeremy Fitzhardinge 1223822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1224822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 122543ebdac4SPekka Enberg va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1226dbda591dSKyongHo va->flags = VM_VM_AREA; 1227822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1228822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1229dbda591dSKyongHo va->vm = tmp; 1230822c18f2SIvan Kokshaysky __insert_vmap_area(va); 1231822c18f2SIvan Kokshaysky } 1232ca23e405STejun Heo 1233ca23e405STejun Heo vmap_area_pcpu_hole = VMALLOC_END; 1234ca23e405STejun Heo 12359b463334SJeremy Fitzhardinge vmap_initialized = true; 1236db64fe02SNick Piggin } 1237db64fe02SNick Piggin 12388fc48985STejun Heo /** 12398fc48985STejun Heo * map_kernel_range_noflush - map kernel VM area with the specified pages 12408fc48985STejun Heo * @addr: start of the VM area to map 12418fc48985STejun Heo * @size: size of the VM area to map 12428fc48985STejun Heo * @prot: page protection flags to use 12438fc48985STejun Heo * @pages: pages to map 12448fc48985STejun Heo * 12458fc48985STejun Heo * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 12468fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 12478fc48985STejun Heo * friends. 12488fc48985STejun Heo * 12498fc48985STejun Heo * NOTE: 12508fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 12518fc48985STejun Heo * responsible for calling flush_cache_vmap() on to-be-mapped areas 12528fc48985STejun Heo * before calling this function. 12538fc48985STejun Heo * 12548fc48985STejun Heo * RETURNS: 12558fc48985STejun Heo * The number of pages mapped on success, -errno on failure. 12568fc48985STejun Heo */ 12578fc48985STejun Heo int map_kernel_range_noflush(unsigned long addr, unsigned long size, 12588fc48985STejun Heo pgprot_t prot, struct page **pages) 12598fc48985STejun Heo { 12608fc48985STejun Heo return vmap_page_range_noflush(addr, addr + size, prot, pages); 12618fc48985STejun Heo } 12628fc48985STejun Heo 12638fc48985STejun Heo /** 12648fc48985STejun Heo * unmap_kernel_range_noflush - unmap kernel VM area 12658fc48985STejun Heo * @addr: start of the VM area to unmap 12668fc48985STejun Heo * @size: size of the VM area to unmap 12678fc48985STejun Heo * 12688fc48985STejun Heo * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 12698fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 12708fc48985STejun Heo * friends. 12718fc48985STejun Heo * 12728fc48985STejun Heo * NOTE: 12738fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 12748fc48985STejun Heo * responsible for calling flush_cache_vunmap() on to-be-mapped areas 12758fc48985STejun Heo * before calling this function and flush_tlb_kernel_range() after. 12768fc48985STejun Heo */ 12778fc48985STejun Heo void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 12788fc48985STejun Heo { 12798fc48985STejun Heo vunmap_page_range(addr, addr + size); 12808fc48985STejun Heo } 128181e88fdcSHuang Ying EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 12828fc48985STejun Heo 12838fc48985STejun Heo /** 12848fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 12858fc48985STejun Heo * @addr: start of the VM area to unmap 12868fc48985STejun Heo * @size: size of the VM area to unmap 12878fc48985STejun Heo * 12888fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 12898fc48985STejun Heo * the unmapping and tlb after. 12908fc48985STejun Heo */ 1291db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 1292db64fe02SNick Piggin { 1293db64fe02SNick Piggin unsigned long end = addr + size; 1294f6fcba70STejun Heo 1295f6fcba70STejun Heo flush_cache_vunmap(addr, end); 1296db64fe02SNick Piggin vunmap_page_range(addr, end); 1297db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 1298db64fe02SNick Piggin } 129993ef6d6cSMinchan Kim EXPORT_SYMBOL_GPL(unmap_kernel_range); 1300db64fe02SNick Piggin 1301f6f8ed47SWANG Chao int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) 1302db64fe02SNick Piggin { 1303db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 1304762216abSWanpeng Li unsigned long end = addr + get_vm_area_size(area); 1305db64fe02SNick Piggin int err; 1306db64fe02SNick Piggin 1307f6f8ed47SWANG Chao err = vmap_page_range(addr, end, prot, pages); 1308db64fe02SNick Piggin 1309f6f8ed47SWANG Chao return err > 0 ? 0 : err; 1310db64fe02SNick Piggin } 1311db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 1312db64fe02SNick Piggin 1313f5252e00SMitsuo Hayasaka static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 13145e6cafc8SMarek Szyprowski unsigned long flags, const void *caller) 1315cf88c790STejun Heo { 1316c69480adSJoonsoo Kim spin_lock(&vmap_area_lock); 1317cf88c790STejun Heo vm->flags = flags; 1318cf88c790STejun Heo vm->addr = (void *)va->va_start; 1319cf88c790STejun Heo vm->size = va->va_end - va->va_start; 1320cf88c790STejun Heo vm->caller = caller; 1321db1aecafSMinchan Kim va->vm = vm; 1322cf88c790STejun Heo va->flags |= VM_VM_AREA; 1323c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 1324f5252e00SMitsuo Hayasaka } 1325cf88c790STejun Heo 132620fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 1327f5252e00SMitsuo Hayasaka { 1328d4033afdSJoonsoo Kim /* 132920fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 1330d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 1331d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 1332d4033afdSJoonsoo Kim */ 1333d4033afdSJoonsoo Kim smp_wmb(); 133420fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 1335cf88c790STejun Heo } 1336cf88c790STejun Heo 1337db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 13382dca6999SDavid Miller unsigned long align, unsigned long flags, unsigned long start, 13395e6cafc8SMarek Szyprowski unsigned long end, int node, gfp_t gfp_mask, const void *caller) 1340db64fe02SNick Piggin { 13410006526dSKautuk Consul struct vmap_area *va; 1342db64fe02SNick Piggin struct vm_struct *area; 13431da177e4SLinus Torvalds 134452fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 13451da177e4SLinus Torvalds size = PAGE_ALIGN(size); 134631be8309SOGAWA Hirofumi if (unlikely(!size)) 134731be8309SOGAWA Hirofumi return NULL; 13481da177e4SLinus Torvalds 1349252e5c6eSzijun_hu if (flags & VM_IOREMAP) 1350252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 1351252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 1352252e5c6eSzijun_hu 1353cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 13541da177e4SLinus Torvalds if (unlikely(!area)) 13551da177e4SLinus Torvalds return NULL; 13561da177e4SLinus Torvalds 135771394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 13581da177e4SLinus Torvalds size += PAGE_SIZE; 13591da177e4SLinus Torvalds 1360db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1361db64fe02SNick Piggin if (IS_ERR(va)) { 1362db64fe02SNick Piggin kfree(area); 1363db64fe02SNick Piggin return NULL; 13641da177e4SLinus Torvalds } 13651da177e4SLinus Torvalds 1366f5252e00SMitsuo Hayasaka setup_vmalloc_vm(area, va, flags, caller); 1367f5252e00SMitsuo Hayasaka 13681da177e4SLinus Torvalds return area; 13691da177e4SLinus Torvalds } 13701da177e4SLinus Torvalds 1371930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1372930fc45aSChristoph Lameter unsigned long start, unsigned long end) 1373930fc45aSChristoph Lameter { 137400ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 137500ef2d2fSDavid Rientjes GFP_KERNEL, __builtin_return_address(0)); 1376930fc45aSChristoph Lameter } 13775992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 1378930fc45aSChristoph Lameter 1379c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1380c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 13815e6cafc8SMarek Szyprowski const void *caller) 1382c2968612SBenjamin Herrenschmidt { 138300ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 138400ef2d2fSDavid Rientjes GFP_KERNEL, caller); 1385c2968612SBenjamin Herrenschmidt } 1386c2968612SBenjamin Herrenschmidt 13871da177e4SLinus Torvalds /** 1388183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 13891da177e4SLinus Torvalds * @size: size of the area 13901da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 13911da177e4SLinus Torvalds * 13921da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 13931da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 13941da177e4SLinus Torvalds * on success or %NULL on failure. 13951da177e4SLinus Torvalds */ 13961da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 13971da177e4SLinus Torvalds { 13982dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 139900ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 140000ef2d2fSDavid Rientjes __builtin_return_address(0)); 140123016969SChristoph Lameter } 140223016969SChristoph Lameter 140323016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 14045e6cafc8SMarek Szyprowski const void *caller) 140523016969SChristoph Lameter { 14062dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 140700ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 14081da177e4SLinus Torvalds } 14091da177e4SLinus Torvalds 1410e9da6e99SMarek Szyprowski /** 1411e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 1412e9da6e99SMarek Szyprowski * @addr: base address 1413e9da6e99SMarek Szyprowski * 1414e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 1415e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 1416e9da6e99SMarek Szyprowski * pointer valid. 1417e9da6e99SMarek Szyprowski */ 1418e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 141983342314SNick Piggin { 1420db64fe02SNick Piggin struct vmap_area *va; 142183342314SNick Piggin 1422db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1423db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) 1424db1aecafSMinchan Kim return va->vm; 142583342314SNick Piggin 14267856dfebSAndi Kleen return NULL; 14277856dfebSAndi Kleen } 14287856dfebSAndi Kleen 14291da177e4SLinus Torvalds /** 1430183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 14311da177e4SLinus Torvalds * @addr: base address 14321da177e4SLinus Torvalds * 14331da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 14341da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 14357856dfebSAndi Kleen * on SMP machines, except for its size or flags. 14361da177e4SLinus Torvalds */ 1437b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 14381da177e4SLinus Torvalds { 1439db64fe02SNick Piggin struct vmap_area *va; 1440db64fe02SNick Piggin 14415803ed29SChristoph Hellwig might_sleep(); 14425803ed29SChristoph Hellwig 1443db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1444db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) { 1445db1aecafSMinchan Kim struct vm_struct *vm = va->vm; 1446f5252e00SMitsuo Hayasaka 1447c69480adSJoonsoo Kim spin_lock(&vmap_area_lock); 1448c69480adSJoonsoo Kim va->vm = NULL; 1449c69480adSJoonsoo Kim va->flags &= ~VM_VM_AREA; 1450c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 1451c69480adSJoonsoo Kim 1452dd32c279SKAMEZAWA Hiroyuki vmap_debug_free_range(va->va_start, va->va_end); 1453a5af5aa8SAndrey Ryabinin kasan_free_shadow(vm); 1454dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 1455dd32c279SKAMEZAWA Hiroyuki 1456db64fe02SNick Piggin return vm; 1457db64fe02SNick Piggin } 1458db64fe02SNick Piggin return NULL; 14591da177e4SLinus Torvalds } 14601da177e4SLinus Torvalds 1461b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 14621da177e4SLinus Torvalds { 14631da177e4SLinus Torvalds struct vm_struct *area; 14641da177e4SLinus Torvalds 14651da177e4SLinus Torvalds if (!addr) 14661da177e4SLinus Torvalds return; 14671da177e4SLinus Torvalds 1468e69e9d4aSHATAYAMA Daisuke if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 1469ab15d9b4SDan Carpenter addr)) 14701da177e4SLinus Torvalds return; 14711da177e4SLinus Torvalds 14721da177e4SLinus Torvalds area = remove_vm_area(addr); 14731da177e4SLinus Torvalds if (unlikely(!area)) { 14744c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 14751da177e4SLinus Torvalds addr); 14761da177e4SLinus Torvalds return; 14771da177e4SLinus Torvalds } 14781da177e4SLinus Torvalds 14797511c3edSJerome Marchand debug_check_no_locks_freed(addr, get_vm_area_size(area)); 14807511c3edSJerome Marchand debug_check_no_obj_freed(addr, get_vm_area_size(area)); 14819a11b49aSIngo Molnar 14821da177e4SLinus Torvalds if (deallocate_pages) { 14831da177e4SLinus Torvalds int i; 14841da177e4SLinus Torvalds 14851da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1486bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 1487bf53d6f8SChristoph Lameter 1488bf53d6f8SChristoph Lameter BUG_ON(!page); 14894949148aSVladimir Davydov __free_pages(page, 0); 14901da177e4SLinus Torvalds } 14911da177e4SLinus Torvalds 1492244d63eeSDavid Rientjes kvfree(area->pages); 14931da177e4SLinus Torvalds } 14941da177e4SLinus Torvalds 14951da177e4SLinus Torvalds kfree(area); 14961da177e4SLinus Torvalds return; 14971da177e4SLinus Torvalds } 14981da177e4SLinus Torvalds 1499bf22e37aSAndrey Ryabinin static inline void __vfree_deferred(const void *addr) 1500bf22e37aSAndrey Ryabinin { 1501bf22e37aSAndrey Ryabinin /* 1502bf22e37aSAndrey Ryabinin * Use raw_cpu_ptr() because this can be called from preemptible 1503bf22e37aSAndrey Ryabinin * context. Preemption is absolutely fine here, because the llist_add() 1504bf22e37aSAndrey Ryabinin * implementation is lockless, so it works even if we are adding to 1505bf22e37aSAndrey Ryabinin * nother cpu's list. schedule_work() should be fine with this too. 1506bf22e37aSAndrey Ryabinin */ 1507bf22e37aSAndrey Ryabinin struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 1508bf22e37aSAndrey Ryabinin 1509bf22e37aSAndrey Ryabinin if (llist_add((struct llist_node *)addr, &p->list)) 1510bf22e37aSAndrey Ryabinin schedule_work(&p->wq); 1511bf22e37aSAndrey Ryabinin } 1512bf22e37aSAndrey Ryabinin 1513bf22e37aSAndrey Ryabinin /** 1514bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 1515bf22e37aSAndrey Ryabinin * @addr: memory base address 1516bf22e37aSAndrey Ryabinin * 1517bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 1518bf22e37aSAndrey Ryabinin * except NMIs. 1519bf22e37aSAndrey Ryabinin */ 1520bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 1521bf22e37aSAndrey Ryabinin { 1522bf22e37aSAndrey Ryabinin BUG_ON(in_nmi()); 1523bf22e37aSAndrey Ryabinin 1524bf22e37aSAndrey Ryabinin kmemleak_free(addr); 1525bf22e37aSAndrey Ryabinin 1526bf22e37aSAndrey Ryabinin if (!addr) 1527bf22e37aSAndrey Ryabinin return; 1528bf22e37aSAndrey Ryabinin __vfree_deferred(addr); 1529bf22e37aSAndrey Ryabinin } 1530bf22e37aSAndrey Ryabinin 15311da177e4SLinus Torvalds /** 15321da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 15331da177e4SLinus Torvalds * @addr: memory base address 15341da177e4SLinus Torvalds * 1535183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 153680e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 153780e93effSPekka Enberg * NULL, no operation is performed. 15381da177e4SLinus Torvalds * 153932fcfd40SAl Viro * Must not be called in NMI context (strictly speaking, only if we don't 154032fcfd40SAl Viro * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 154132fcfd40SAl Viro * conventions for vfree() arch-depenedent would be a really bad idea) 154232fcfd40SAl Viro * 1543c9fcee51SAndrew Morton * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node) 15441da177e4SLinus Torvalds */ 1545b3bdda02SChristoph Lameter void vfree(const void *addr) 15461da177e4SLinus Torvalds { 154732fcfd40SAl Viro BUG_ON(in_nmi()); 154889219d37SCatalin Marinas 154989219d37SCatalin Marinas kmemleak_free(addr); 155089219d37SCatalin Marinas 155132fcfd40SAl Viro if (!addr) 155232fcfd40SAl Viro return; 1553bf22e37aSAndrey Ryabinin if (unlikely(in_interrupt())) 1554bf22e37aSAndrey Ryabinin __vfree_deferred(addr); 1555bf22e37aSAndrey Ryabinin else 15561da177e4SLinus Torvalds __vunmap(addr, 1); 15571da177e4SLinus Torvalds } 15581da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 15591da177e4SLinus Torvalds 15601da177e4SLinus Torvalds /** 15611da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 15621da177e4SLinus Torvalds * @addr: memory base address 15631da177e4SLinus Torvalds * 15641da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 15651da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 15661da177e4SLinus Torvalds * 156780e93effSPekka Enberg * Must not be called in interrupt context. 15681da177e4SLinus Torvalds */ 1569b3bdda02SChristoph Lameter void vunmap(const void *addr) 15701da177e4SLinus Torvalds { 15711da177e4SLinus Torvalds BUG_ON(in_interrupt()); 157234754b69SPeter Zijlstra might_sleep(); 157332fcfd40SAl Viro if (addr) 15741da177e4SLinus Torvalds __vunmap(addr, 0); 15751da177e4SLinus Torvalds } 15761da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 15771da177e4SLinus Torvalds 15781da177e4SLinus Torvalds /** 15791da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 15801da177e4SLinus Torvalds * @pages: array of page pointers 15811da177e4SLinus Torvalds * @count: number of pages to map 15821da177e4SLinus Torvalds * @flags: vm_area->flags 15831da177e4SLinus Torvalds * @prot: page protection for the mapping 15841da177e4SLinus Torvalds * 15851da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 15861da177e4SLinus Torvalds * space. 15871da177e4SLinus Torvalds */ 15881da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 15891da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 15901da177e4SLinus Torvalds { 15911da177e4SLinus Torvalds struct vm_struct *area; 159265ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 15931da177e4SLinus Torvalds 159434754b69SPeter Zijlstra might_sleep(); 159534754b69SPeter Zijlstra 15964481374cSJan Beulich if (count > totalram_pages) 15971da177e4SLinus Torvalds return NULL; 15981da177e4SLinus Torvalds 159965ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 160065ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 16011da177e4SLinus Torvalds if (!area) 16021da177e4SLinus Torvalds return NULL; 160323016969SChristoph Lameter 1604f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) { 16051da177e4SLinus Torvalds vunmap(area->addr); 16061da177e4SLinus Torvalds return NULL; 16071da177e4SLinus Torvalds } 16081da177e4SLinus Torvalds 16091da177e4SLinus Torvalds return area->addr; 16101da177e4SLinus Torvalds } 16111da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 16121da177e4SLinus Torvalds 16132dca6999SDavid Miller static void *__vmalloc_node(unsigned long size, unsigned long align, 16142dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 16155e6cafc8SMarek Szyprowski int node, const void *caller); 1616e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 16173722e13cSWanpeng Li pgprot_t prot, int node) 16181da177e4SLinus Torvalds { 16191da177e4SLinus Torvalds struct page **pages; 16201da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 1621930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1622930f036bSDavid Rientjes const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; 16231da177e4SLinus Torvalds 1624762216abSWanpeng Li nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 16251da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 16261da177e4SLinus Torvalds 16271da177e4SLinus Torvalds area->nr_pages = nr_pages; 16281da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 16298757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 1630976d6dfbSJan Beulich pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, 16313722e13cSWanpeng Li PAGE_KERNEL, node, area->caller); 1632286e1ea3SAndrew Morton } else { 1633976d6dfbSJan Beulich pages = kmalloc_node(array_size, nested_gfp, node); 1634286e1ea3SAndrew Morton } 16351da177e4SLinus Torvalds area->pages = pages; 16361da177e4SLinus Torvalds if (!area->pages) { 16371da177e4SLinus Torvalds remove_vm_area(area->addr); 16381da177e4SLinus Torvalds kfree(area); 16391da177e4SLinus Torvalds return NULL; 16401da177e4SLinus Torvalds } 16411da177e4SLinus Torvalds 16421da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1643bf53d6f8SChristoph Lameter struct page *page; 1644bf53d6f8SChristoph Lameter 16454b90951cSJianguo Wu if (node == NUMA_NO_NODE) 16467877cdccSMichal Hocko page = alloc_page(alloc_mask); 1647930fc45aSChristoph Lameter else 16487877cdccSMichal Hocko page = alloc_pages_node(node, alloc_mask, 0); 1649bf53d6f8SChristoph Lameter 1650bf53d6f8SChristoph Lameter if (unlikely(!page)) { 16511da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 16521da177e4SLinus Torvalds area->nr_pages = i; 16531da177e4SLinus Torvalds goto fail; 16541da177e4SLinus Torvalds } 1655bf53d6f8SChristoph Lameter area->pages[i] = page; 1656d0164adcSMel Gorman if (gfpflags_allow_blocking(gfp_mask)) 1657660654f9SEric Dumazet cond_resched(); 16581da177e4SLinus Torvalds } 16591da177e4SLinus Torvalds 1660f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) 16611da177e4SLinus Torvalds goto fail; 16621da177e4SLinus Torvalds return area->addr; 16631da177e4SLinus Torvalds 16641da177e4SLinus Torvalds fail: 1665*a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 16667877cdccSMichal Hocko "vmalloc: allocation failure, allocated %ld of %ld bytes", 166722943ab1SDave Hansen (area->nr_pages*PAGE_SIZE), area->size); 16681da177e4SLinus Torvalds vfree(area->addr); 16691da177e4SLinus Torvalds return NULL; 16701da177e4SLinus Torvalds } 16711da177e4SLinus Torvalds 1672d0a21265SDavid Rientjes /** 1673d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 1674d0a21265SDavid Rientjes * @size: allocation size 1675d0a21265SDavid Rientjes * @align: desired alignment 1676d0a21265SDavid Rientjes * @start: vm area range start 1677d0a21265SDavid Rientjes * @end: vm area range end 1678d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 1679d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 1680cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 168100ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 1682d0a21265SDavid Rientjes * @caller: caller's return address 1683d0a21265SDavid Rientjes * 1684d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 1685d0a21265SDavid Rientjes * allocator with @gfp_mask flags. Map them into contiguous 1686d0a21265SDavid Rientjes * kernel virtual space, using a pagetable protection of @prot. 1687d0a21265SDavid Rientjes */ 1688d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 1689d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 1690cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 1691cb9e3c29SAndrey Ryabinin const void *caller) 1692930fc45aSChristoph Lameter { 1693d0a21265SDavid Rientjes struct vm_struct *area; 1694d0a21265SDavid Rientjes void *addr; 1695d0a21265SDavid Rientjes unsigned long real_size = size; 1696d0a21265SDavid Rientjes 1697d0a21265SDavid Rientjes size = PAGE_ALIGN(size); 1698d0a21265SDavid Rientjes if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1699de7d2b56SJoe Perches goto fail; 1700d0a21265SDavid Rientjes 1701cb9e3c29SAndrey Ryabinin area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | 1702cb9e3c29SAndrey Ryabinin vm_flags, start, end, node, gfp_mask, caller); 1703d0a21265SDavid Rientjes if (!area) 1704de7d2b56SJoe Perches goto fail; 1705d0a21265SDavid Rientjes 17063722e13cSWanpeng Li addr = __vmalloc_area_node(area, gfp_mask, prot, node); 17071368edf0SMel Gorman if (!addr) 1708b82225f3SWanpeng Li return NULL; 170989219d37SCatalin Marinas 171089219d37SCatalin Marinas /* 171120fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 171220fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 17134341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 1714f5252e00SMitsuo Hayasaka */ 171520fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 1716f5252e00SMitsuo Hayasaka 1717f5252e00SMitsuo Hayasaka /* 17187f88f88fSCatalin Marinas * A ref_count = 2 is needed because vm_struct allocated in 17197f88f88fSCatalin Marinas * __get_vm_area_node() contains a reference to the virtual address of 17207f88f88fSCatalin Marinas * the vmalloc'ed block. 172189219d37SCatalin Marinas */ 17227f88f88fSCatalin Marinas kmemleak_alloc(addr, real_size, 2, gfp_mask); 172389219d37SCatalin Marinas 172489219d37SCatalin Marinas return addr; 1725de7d2b56SJoe Perches 1726de7d2b56SJoe Perches fail: 1727*a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 17287877cdccSMichal Hocko "vmalloc: allocation failure: %lu bytes", real_size); 1729de7d2b56SJoe Perches return NULL; 1730930fc45aSChristoph Lameter } 1731930fc45aSChristoph Lameter 17321da177e4SLinus Torvalds /** 1733930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 17341da177e4SLinus Torvalds * @size: allocation size 17352dca6999SDavid Miller * @align: desired alignment 17361da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 17371da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 173800ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 1739c85d194bSRandy Dunlap * @caller: caller's return address 17401da177e4SLinus Torvalds * 17411da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 17421da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 17431da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 17441da177e4SLinus Torvalds */ 17452dca6999SDavid Miller static void *__vmalloc_node(unsigned long size, unsigned long align, 17462dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 17475e6cafc8SMarek Szyprowski int node, const void *caller) 17481da177e4SLinus Torvalds { 1749d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 1750cb9e3c29SAndrey Ryabinin gfp_mask, prot, 0, node, caller); 17511da177e4SLinus Torvalds } 17521da177e4SLinus Torvalds 1753930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1754930fc45aSChristoph Lameter { 175500ef2d2fSDavid Rientjes return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 175623016969SChristoph Lameter __builtin_return_address(0)); 1757930fc45aSChristoph Lameter } 17581da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 17591da177e4SLinus Torvalds 1760e1ca7788SDave Young static inline void *__vmalloc_node_flags(unsigned long size, 1761e1ca7788SDave Young int node, gfp_t flags) 1762e1ca7788SDave Young { 1763e1ca7788SDave Young return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 1764e1ca7788SDave Young node, __builtin_return_address(0)); 1765e1ca7788SDave Young } 1766e1ca7788SDave Young 17671da177e4SLinus Torvalds /** 17681da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 17691da177e4SLinus Torvalds * @size: allocation size 17701da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 17711da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 17721da177e4SLinus Torvalds * 1773c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 17741da177e4SLinus Torvalds * use __vmalloc() instead. 17751da177e4SLinus Torvalds */ 17761da177e4SLinus Torvalds void *vmalloc(unsigned long size) 17771da177e4SLinus Torvalds { 177800ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 177900ef2d2fSDavid Rientjes GFP_KERNEL | __GFP_HIGHMEM); 17801da177e4SLinus Torvalds } 17811da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 17821da177e4SLinus Torvalds 1783930fc45aSChristoph Lameter /** 1784e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 1785e1ca7788SDave Young * @size: allocation size 1786e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 1787e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 1788e1ca7788SDave Young * The memory allocated is set to zero. 1789e1ca7788SDave Young * 1790e1ca7788SDave Young * For tight control over page level allocator and protection flags 1791e1ca7788SDave Young * use __vmalloc() instead. 1792e1ca7788SDave Young */ 1793e1ca7788SDave Young void *vzalloc(unsigned long size) 1794e1ca7788SDave Young { 179500ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 1796e1ca7788SDave Young GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1797e1ca7788SDave Young } 1798e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 1799e1ca7788SDave Young 1800e1ca7788SDave Young /** 1801ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 180283342314SNick Piggin * @size: allocation size 1803ead04089SRolf Eike Beer * 1804ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 1805ead04089SRolf Eike Beer * without leaking data. 180683342314SNick Piggin */ 180783342314SNick Piggin void *vmalloc_user(unsigned long size) 180883342314SNick Piggin { 180983342314SNick Piggin struct vm_struct *area; 181083342314SNick Piggin void *ret; 181183342314SNick Piggin 18122dca6999SDavid Miller ret = __vmalloc_node(size, SHMLBA, 18132dca6999SDavid Miller GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 181400ef2d2fSDavid Rientjes PAGE_KERNEL, NUMA_NO_NODE, 181500ef2d2fSDavid Rientjes __builtin_return_address(0)); 18162b4ac44eSEric Dumazet if (ret) { 1817db64fe02SNick Piggin area = find_vm_area(ret); 181883342314SNick Piggin area->flags |= VM_USERMAP; 18192b4ac44eSEric Dumazet } 182083342314SNick Piggin return ret; 182183342314SNick Piggin } 182283342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 182383342314SNick Piggin 182483342314SNick Piggin /** 1825930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 1826930fc45aSChristoph Lameter * @size: allocation size 1827d44e0780SRandy Dunlap * @node: numa node 1828930fc45aSChristoph Lameter * 1829930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 1830930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 1831930fc45aSChristoph Lameter * 1832c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 1833930fc45aSChristoph Lameter * use __vmalloc() instead. 1834930fc45aSChristoph Lameter */ 1835930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 1836930fc45aSChristoph Lameter { 18372dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 183823016969SChristoph Lameter node, __builtin_return_address(0)); 1839930fc45aSChristoph Lameter } 1840930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 1841930fc45aSChristoph Lameter 1842e1ca7788SDave Young /** 1843e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 1844e1ca7788SDave Young * @size: allocation size 1845e1ca7788SDave Young * @node: numa node 1846e1ca7788SDave Young * 1847e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 1848e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 1849e1ca7788SDave Young * The memory allocated is set to zero. 1850e1ca7788SDave Young * 1851e1ca7788SDave Young * For tight control over page level allocator and protection flags 1852e1ca7788SDave Young * use __vmalloc_node() instead. 1853e1ca7788SDave Young */ 1854e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 1855e1ca7788SDave Young { 1856e1ca7788SDave Young return __vmalloc_node_flags(size, node, 1857e1ca7788SDave Young GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1858e1ca7788SDave Young } 1859e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 1860e1ca7788SDave Young 18614dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC 18624dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL 18634dc3b16bSPavel Pisa #endif 18644dc3b16bSPavel Pisa 18651da177e4SLinus Torvalds /** 18661da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 18671da177e4SLinus Torvalds * @size: allocation size 18681da177e4SLinus Torvalds * 18691da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 18701da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 18711da177e4SLinus Torvalds * executable kernel virtual space. 18721da177e4SLinus Torvalds * 1873c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 18741da177e4SLinus Torvalds * use __vmalloc() instead. 18751da177e4SLinus Torvalds */ 18761da177e4SLinus Torvalds 18771da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 18781da177e4SLinus Torvalds { 18792dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 188000ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 18811da177e4SLinus Torvalds } 18821da177e4SLinus Torvalds 18830d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 18847ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 18850d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 18867ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 18870d08e0d3SAndi Kleen #else 18880d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL 18890d08e0d3SAndi Kleen #endif 18900d08e0d3SAndi Kleen 18911da177e4SLinus Torvalds /** 18921da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 18931da177e4SLinus Torvalds * @size: allocation size 18941da177e4SLinus Torvalds * 18951da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 18961da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 18971da177e4SLinus Torvalds */ 18981da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 18991da177e4SLinus Torvalds { 19002dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 190100ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 19021da177e4SLinus Torvalds } 19031da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 19041da177e4SLinus Torvalds 190583342314SNick Piggin /** 1906ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 190783342314SNick Piggin * @size: allocation size 1908ead04089SRolf Eike Beer * 1909ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 1910ead04089SRolf Eike Beer * mapped to userspace without leaking data. 191183342314SNick Piggin */ 191283342314SNick Piggin void *vmalloc_32_user(unsigned long size) 191383342314SNick Piggin { 191483342314SNick Piggin struct vm_struct *area; 191583342314SNick Piggin void *ret; 191683342314SNick Piggin 19172dca6999SDavid Miller ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 191800ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 19192b4ac44eSEric Dumazet if (ret) { 1920db64fe02SNick Piggin area = find_vm_area(ret); 192183342314SNick Piggin area->flags |= VM_USERMAP; 19222b4ac44eSEric Dumazet } 192383342314SNick Piggin return ret; 192483342314SNick Piggin } 192583342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 192683342314SNick Piggin 1927d0107eb0SKAMEZAWA Hiroyuki /* 1928d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 1929d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 1930d0107eb0SKAMEZAWA Hiroyuki */ 1931d0107eb0SKAMEZAWA Hiroyuki 1932d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 1933d0107eb0SKAMEZAWA Hiroyuki { 1934d0107eb0SKAMEZAWA Hiroyuki struct page *p; 1935d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 1936d0107eb0SKAMEZAWA Hiroyuki 1937d0107eb0SKAMEZAWA Hiroyuki while (count) { 1938d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 1939d0107eb0SKAMEZAWA Hiroyuki 1940891c49abSAlexander Kuleshov offset = offset_in_page(addr); 1941d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 1942d0107eb0SKAMEZAWA Hiroyuki if (length > count) 1943d0107eb0SKAMEZAWA Hiroyuki length = count; 1944d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 1945d0107eb0SKAMEZAWA Hiroyuki /* 1946d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 1947d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 1948d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 1949d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 1950d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 1951d0107eb0SKAMEZAWA Hiroyuki */ 1952d0107eb0SKAMEZAWA Hiroyuki if (p) { 1953d0107eb0SKAMEZAWA Hiroyuki /* 1954d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 1955d0107eb0SKAMEZAWA Hiroyuki * function description) 1956d0107eb0SKAMEZAWA Hiroyuki */ 19579b04c5feSCong Wang void *map = kmap_atomic(p); 1958d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 19599b04c5feSCong Wang kunmap_atomic(map); 1960d0107eb0SKAMEZAWA Hiroyuki } else 1961d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 1962d0107eb0SKAMEZAWA Hiroyuki 1963d0107eb0SKAMEZAWA Hiroyuki addr += length; 1964d0107eb0SKAMEZAWA Hiroyuki buf += length; 1965d0107eb0SKAMEZAWA Hiroyuki copied += length; 1966d0107eb0SKAMEZAWA Hiroyuki count -= length; 1967d0107eb0SKAMEZAWA Hiroyuki } 1968d0107eb0SKAMEZAWA Hiroyuki return copied; 1969d0107eb0SKAMEZAWA Hiroyuki } 1970d0107eb0SKAMEZAWA Hiroyuki 1971d0107eb0SKAMEZAWA Hiroyuki static int aligned_vwrite(char *buf, char *addr, unsigned long count) 1972d0107eb0SKAMEZAWA Hiroyuki { 1973d0107eb0SKAMEZAWA Hiroyuki struct page *p; 1974d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 1975d0107eb0SKAMEZAWA Hiroyuki 1976d0107eb0SKAMEZAWA Hiroyuki while (count) { 1977d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 1978d0107eb0SKAMEZAWA Hiroyuki 1979891c49abSAlexander Kuleshov offset = offset_in_page(addr); 1980d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 1981d0107eb0SKAMEZAWA Hiroyuki if (length > count) 1982d0107eb0SKAMEZAWA Hiroyuki length = count; 1983d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 1984d0107eb0SKAMEZAWA Hiroyuki /* 1985d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 1986d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 1987d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 1988d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 1989d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 1990d0107eb0SKAMEZAWA Hiroyuki */ 1991d0107eb0SKAMEZAWA Hiroyuki if (p) { 1992d0107eb0SKAMEZAWA Hiroyuki /* 1993d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 1994d0107eb0SKAMEZAWA Hiroyuki * function description) 1995d0107eb0SKAMEZAWA Hiroyuki */ 19969b04c5feSCong Wang void *map = kmap_atomic(p); 1997d0107eb0SKAMEZAWA Hiroyuki memcpy(map + offset, buf, length); 19989b04c5feSCong Wang kunmap_atomic(map); 1999d0107eb0SKAMEZAWA Hiroyuki } 2000d0107eb0SKAMEZAWA Hiroyuki addr += length; 2001d0107eb0SKAMEZAWA Hiroyuki buf += length; 2002d0107eb0SKAMEZAWA Hiroyuki copied += length; 2003d0107eb0SKAMEZAWA Hiroyuki count -= length; 2004d0107eb0SKAMEZAWA Hiroyuki } 2005d0107eb0SKAMEZAWA Hiroyuki return copied; 2006d0107eb0SKAMEZAWA Hiroyuki } 2007d0107eb0SKAMEZAWA Hiroyuki 2008d0107eb0SKAMEZAWA Hiroyuki /** 2009d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 2010d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 2011d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2012d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2013d0107eb0SKAMEZAWA Hiroyuki * 2014d0107eb0SKAMEZAWA Hiroyuki * Returns # of bytes which addr and buf should be increased. 2015d0107eb0SKAMEZAWA Hiroyuki * (same number to @count). Returns 0 if [addr...addr+count) doesn't 2016d0107eb0SKAMEZAWA Hiroyuki * includes any intersect with alive vmalloc area. 2017d0107eb0SKAMEZAWA Hiroyuki * 2018d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2019d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 2020d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 2021d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 2022d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2023d0107eb0SKAMEZAWA Hiroyuki * 2024d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2025a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2026d0107eb0SKAMEZAWA Hiroyuki * 2027d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 2028d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2029d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2030d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2031d0107eb0SKAMEZAWA Hiroyuki * 2032d0107eb0SKAMEZAWA Hiroyuki */ 2033d0107eb0SKAMEZAWA Hiroyuki 20341da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 20351da177e4SLinus Torvalds { 2036e81ce85fSJoonsoo Kim struct vmap_area *va; 2037e81ce85fSJoonsoo Kim struct vm_struct *vm; 20381da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 2039d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 20401da177e4SLinus Torvalds unsigned long n; 20411da177e4SLinus Torvalds 20421da177e4SLinus Torvalds /* Don't allow overflow */ 20431da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 20441da177e4SLinus Torvalds count = -(unsigned long) addr; 20451da177e4SLinus Torvalds 2046e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2047e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2048e81ce85fSJoonsoo Kim if (!count) 2049e81ce85fSJoonsoo Kim break; 2050e81ce85fSJoonsoo Kim 2051e81ce85fSJoonsoo Kim if (!(va->flags & VM_VM_AREA)) 2052e81ce85fSJoonsoo Kim continue; 2053e81ce85fSJoonsoo Kim 2054e81ce85fSJoonsoo Kim vm = va->vm; 2055e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2056762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 20571da177e4SLinus Torvalds continue; 20581da177e4SLinus Torvalds while (addr < vaddr) { 20591da177e4SLinus Torvalds if (count == 0) 20601da177e4SLinus Torvalds goto finished; 20611da177e4SLinus Torvalds *buf = '\0'; 20621da177e4SLinus Torvalds buf++; 20631da177e4SLinus Torvalds addr++; 20641da177e4SLinus Torvalds count--; 20651da177e4SLinus Torvalds } 2066762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2067d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2068d0107eb0SKAMEZAWA Hiroyuki n = count; 2069e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) 2070d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 2071d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 2072d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 2073d0107eb0SKAMEZAWA Hiroyuki buf += n; 2074d0107eb0SKAMEZAWA Hiroyuki addr += n; 2075d0107eb0SKAMEZAWA Hiroyuki count -= n; 20761da177e4SLinus Torvalds } 20771da177e4SLinus Torvalds finished: 2078e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2079d0107eb0SKAMEZAWA Hiroyuki 2080d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 2081d0107eb0SKAMEZAWA Hiroyuki return 0; 2082d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 2083d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 2084d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 2085d0107eb0SKAMEZAWA Hiroyuki 2086d0107eb0SKAMEZAWA Hiroyuki return buflen; 20871da177e4SLinus Torvalds } 20881da177e4SLinus Torvalds 2089d0107eb0SKAMEZAWA Hiroyuki /** 2090d0107eb0SKAMEZAWA Hiroyuki * vwrite() - write vmalloc area in a safe way. 2091d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for source data 2092d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2093d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2094d0107eb0SKAMEZAWA Hiroyuki * 2095d0107eb0SKAMEZAWA Hiroyuki * Returns # of bytes which addr and buf should be incresed. 2096d0107eb0SKAMEZAWA Hiroyuki * (same number to @count). 2097d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersect with valid 2098d0107eb0SKAMEZAWA Hiroyuki * vmalloc area, returns 0. 2099d0107eb0SKAMEZAWA Hiroyuki * 2100d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2101d0107eb0SKAMEZAWA Hiroyuki * copy data from a buffer to the given addr. If specified range of 2102d0107eb0SKAMEZAWA Hiroyuki * [addr...addr+count) includes some valid address, data is copied from 2103d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, no copy to hole. 2104d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2105d0107eb0SKAMEZAWA Hiroyuki * 2106d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2107a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2108d0107eb0SKAMEZAWA Hiroyuki * 2109d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vwrite() is never necessary because the caller 2110d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2111d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2112d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2113d0107eb0SKAMEZAWA Hiroyuki */ 2114d0107eb0SKAMEZAWA Hiroyuki 21151da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 21161da177e4SLinus Torvalds { 2117e81ce85fSJoonsoo Kim struct vmap_area *va; 2118e81ce85fSJoonsoo Kim struct vm_struct *vm; 2119d0107eb0SKAMEZAWA Hiroyuki char *vaddr; 2120d0107eb0SKAMEZAWA Hiroyuki unsigned long n, buflen; 2121d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 21221da177e4SLinus Torvalds 21231da177e4SLinus Torvalds /* Don't allow overflow */ 21241da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 21251da177e4SLinus Torvalds count = -(unsigned long) addr; 2126d0107eb0SKAMEZAWA Hiroyuki buflen = count; 21271da177e4SLinus Torvalds 2128e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2129e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2130e81ce85fSJoonsoo Kim if (!count) 2131e81ce85fSJoonsoo Kim break; 2132e81ce85fSJoonsoo Kim 2133e81ce85fSJoonsoo Kim if (!(va->flags & VM_VM_AREA)) 2134e81ce85fSJoonsoo Kim continue; 2135e81ce85fSJoonsoo Kim 2136e81ce85fSJoonsoo Kim vm = va->vm; 2137e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2138762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 21391da177e4SLinus Torvalds continue; 21401da177e4SLinus Torvalds while (addr < vaddr) { 21411da177e4SLinus Torvalds if (count == 0) 21421da177e4SLinus Torvalds goto finished; 21431da177e4SLinus Torvalds buf++; 21441da177e4SLinus Torvalds addr++; 21451da177e4SLinus Torvalds count--; 21461da177e4SLinus Torvalds } 2147762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2148d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2149d0107eb0SKAMEZAWA Hiroyuki n = count; 2150e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) { 2151d0107eb0SKAMEZAWA Hiroyuki aligned_vwrite(buf, addr, n); 2152d0107eb0SKAMEZAWA Hiroyuki copied++; 2153d0107eb0SKAMEZAWA Hiroyuki } 2154d0107eb0SKAMEZAWA Hiroyuki buf += n; 2155d0107eb0SKAMEZAWA Hiroyuki addr += n; 2156d0107eb0SKAMEZAWA Hiroyuki count -= n; 21571da177e4SLinus Torvalds } 21581da177e4SLinus Torvalds finished: 2159e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2160d0107eb0SKAMEZAWA Hiroyuki if (!copied) 2161d0107eb0SKAMEZAWA Hiroyuki return 0; 2162d0107eb0SKAMEZAWA Hiroyuki return buflen; 21631da177e4SLinus Torvalds } 216483342314SNick Piggin 216583342314SNick Piggin /** 2166e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 2167e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 2168e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 2169e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 2170e69e9d4aSHATAYAMA Daisuke * @size: size of map area 2171e69e9d4aSHATAYAMA Daisuke * 2172e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 2173e69e9d4aSHATAYAMA Daisuke * 2174e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 2175e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 2176e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 2177e69e9d4aSHATAYAMA Daisuke * met. 2178e69e9d4aSHATAYAMA Daisuke * 2179e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 2180e69e9d4aSHATAYAMA Daisuke */ 2181e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 2182e69e9d4aSHATAYAMA Daisuke void *kaddr, unsigned long size) 2183e69e9d4aSHATAYAMA Daisuke { 2184e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 2185e69e9d4aSHATAYAMA Daisuke 2186e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 2187e69e9d4aSHATAYAMA Daisuke 2188e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 2189e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2190e69e9d4aSHATAYAMA Daisuke 2191e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 2192e69e9d4aSHATAYAMA Daisuke if (!area) 2193e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2194e69e9d4aSHATAYAMA Daisuke 2195e69e9d4aSHATAYAMA Daisuke if (!(area->flags & VM_USERMAP)) 2196e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2197e69e9d4aSHATAYAMA Daisuke 2198e69e9d4aSHATAYAMA Daisuke if (kaddr + size > area->addr + area->size) 2199e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2200e69e9d4aSHATAYAMA Daisuke 2201e69e9d4aSHATAYAMA Daisuke do { 2202e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 2203e69e9d4aSHATAYAMA Daisuke int ret; 2204e69e9d4aSHATAYAMA Daisuke 2205e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 2206e69e9d4aSHATAYAMA Daisuke if (ret) 2207e69e9d4aSHATAYAMA Daisuke return ret; 2208e69e9d4aSHATAYAMA Daisuke 2209e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 2210e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 2211e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 2212e69e9d4aSHATAYAMA Daisuke } while (size > 0); 2213e69e9d4aSHATAYAMA Daisuke 2214e69e9d4aSHATAYAMA Daisuke vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 2215e69e9d4aSHATAYAMA Daisuke 2216e69e9d4aSHATAYAMA Daisuke return 0; 2217e69e9d4aSHATAYAMA Daisuke } 2218e69e9d4aSHATAYAMA Daisuke EXPORT_SYMBOL(remap_vmalloc_range_partial); 2219e69e9d4aSHATAYAMA Daisuke 2220e69e9d4aSHATAYAMA Daisuke /** 222183342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 222283342314SNick Piggin * @vma: vma to cover (map full range of vma) 222383342314SNick Piggin * @addr: vmalloc memory 222483342314SNick Piggin * @pgoff: number of pages into addr before first page to map 22257682486bSRandy Dunlap * 22267682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 222783342314SNick Piggin * 222883342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 222983342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 223083342314SNick Piggin * that criteria isn't met. 223183342314SNick Piggin * 223272fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 223383342314SNick Piggin */ 223483342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 223583342314SNick Piggin unsigned long pgoff) 223683342314SNick Piggin { 2237e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 2238e69e9d4aSHATAYAMA Daisuke addr + (pgoff << PAGE_SHIFT), 2239e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 224083342314SNick Piggin } 224183342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 224283342314SNick Piggin 22431eeb66a1SChristoph Hellwig /* 22441eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 22451eeb66a1SChristoph Hellwig * have one. 22461eeb66a1SChristoph Hellwig */ 22473b32123dSGideon Israel Dsouza void __weak vmalloc_sync_all(void) 22481eeb66a1SChristoph Hellwig { 22491eeb66a1SChristoph Hellwig } 22505f4352fbSJeremy Fitzhardinge 22515f4352fbSJeremy Fitzhardinge 22522f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 22535f4352fbSJeremy Fitzhardinge { 2254cd12909cSDavid Vrabel pte_t ***p = data; 2255cd12909cSDavid Vrabel 2256cd12909cSDavid Vrabel if (p) { 2257cd12909cSDavid Vrabel *(*p) = pte; 2258cd12909cSDavid Vrabel (*p)++; 2259cd12909cSDavid Vrabel } 22605f4352fbSJeremy Fitzhardinge return 0; 22615f4352fbSJeremy Fitzhardinge } 22625f4352fbSJeremy Fitzhardinge 22635f4352fbSJeremy Fitzhardinge /** 22645f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 22655f4352fbSJeremy Fitzhardinge * @size: size of the area 2266cd12909cSDavid Vrabel * @ptes: returns the PTEs for the address space 22677682486bSRandy Dunlap * 22687682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 22695f4352fbSJeremy Fitzhardinge * 22705f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 22715f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 2272cd12909cSDavid Vrabel * are created. 2273cd12909cSDavid Vrabel * 2274cd12909cSDavid Vrabel * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 2275cd12909cSDavid Vrabel * allocated for the VM area are returned. 22765f4352fbSJeremy Fitzhardinge */ 2277cd12909cSDavid Vrabel struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 22785f4352fbSJeremy Fitzhardinge { 22795f4352fbSJeremy Fitzhardinge struct vm_struct *area; 22805f4352fbSJeremy Fitzhardinge 228123016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 228223016969SChristoph Lameter __builtin_return_address(0)); 22835f4352fbSJeremy Fitzhardinge if (area == NULL) 22845f4352fbSJeremy Fitzhardinge return NULL; 22855f4352fbSJeremy Fitzhardinge 22865f4352fbSJeremy Fitzhardinge /* 22875f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 22885f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 22895f4352fbSJeremy Fitzhardinge */ 22905f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2291cd12909cSDavid Vrabel size, f, ptes ? &ptes : NULL)) { 22925f4352fbSJeremy Fitzhardinge free_vm_area(area); 22935f4352fbSJeremy Fitzhardinge return NULL; 22945f4352fbSJeremy Fitzhardinge } 22955f4352fbSJeremy Fitzhardinge 22965f4352fbSJeremy Fitzhardinge return area; 22975f4352fbSJeremy Fitzhardinge } 22985f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 22995f4352fbSJeremy Fitzhardinge 23005f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 23015f4352fbSJeremy Fitzhardinge { 23025f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 23035f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 23045f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 23055f4352fbSJeremy Fitzhardinge kfree(area); 23065f4352fbSJeremy Fitzhardinge } 23075f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 2308a10aa579SChristoph Lameter 23094f8b02b4STejun Heo #ifdef CONFIG_SMP 2310ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 2311ca23e405STejun Heo { 23124583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 2313ca23e405STejun Heo } 2314ca23e405STejun Heo 2315ca23e405STejun Heo /** 2316ca23e405STejun Heo * pvm_find_next_prev - find the next and prev vmap_area surrounding @end 2317ca23e405STejun Heo * @end: target address 2318ca23e405STejun Heo * @pnext: out arg for the next vmap_area 2319ca23e405STejun Heo * @pprev: out arg for the previous vmap_area 2320ca23e405STejun Heo * 2321ca23e405STejun Heo * Returns: %true if either or both of next and prev are found, 2322ca23e405STejun Heo * %false if no vmap_area exists 2323ca23e405STejun Heo * 2324ca23e405STejun Heo * Find vmap_areas end addresses of which enclose @end. ie. if not 2325ca23e405STejun Heo * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. 2326ca23e405STejun Heo */ 2327ca23e405STejun Heo static bool pvm_find_next_prev(unsigned long end, 2328ca23e405STejun Heo struct vmap_area **pnext, 2329ca23e405STejun Heo struct vmap_area **pprev) 2330ca23e405STejun Heo { 2331ca23e405STejun Heo struct rb_node *n = vmap_area_root.rb_node; 2332ca23e405STejun Heo struct vmap_area *va = NULL; 2333ca23e405STejun Heo 2334ca23e405STejun Heo while (n) { 2335ca23e405STejun Heo va = rb_entry(n, struct vmap_area, rb_node); 2336ca23e405STejun Heo if (end < va->va_end) 2337ca23e405STejun Heo n = n->rb_left; 2338ca23e405STejun Heo else if (end > va->va_end) 2339ca23e405STejun Heo n = n->rb_right; 2340ca23e405STejun Heo else 2341ca23e405STejun Heo break; 2342ca23e405STejun Heo } 2343ca23e405STejun Heo 2344ca23e405STejun Heo if (!va) 2345ca23e405STejun Heo return false; 2346ca23e405STejun Heo 2347ca23e405STejun Heo if (va->va_end > end) { 2348ca23e405STejun Heo *pnext = va; 2349ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2350ca23e405STejun Heo } else { 2351ca23e405STejun Heo *pprev = va; 2352ca23e405STejun Heo *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); 2353ca23e405STejun Heo } 2354ca23e405STejun Heo return true; 2355ca23e405STejun Heo } 2356ca23e405STejun Heo 2357ca23e405STejun Heo /** 2358ca23e405STejun Heo * pvm_determine_end - find the highest aligned address between two vmap_areas 2359ca23e405STejun Heo * @pnext: in/out arg for the next vmap_area 2360ca23e405STejun Heo * @pprev: in/out arg for the previous vmap_area 2361ca23e405STejun Heo * @align: alignment 2362ca23e405STejun Heo * 2363ca23e405STejun Heo * Returns: determined end address 2364ca23e405STejun Heo * 2365ca23e405STejun Heo * Find the highest aligned address between *@pnext and *@pprev below 2366ca23e405STejun Heo * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned 2367ca23e405STejun Heo * down address is between the end addresses of the two vmap_areas. 2368ca23e405STejun Heo * 2369ca23e405STejun Heo * Please note that the address returned by this function may fall 2370ca23e405STejun Heo * inside *@pnext vmap_area. The caller is responsible for checking 2371ca23e405STejun Heo * that. 2372ca23e405STejun Heo */ 2373ca23e405STejun Heo static unsigned long pvm_determine_end(struct vmap_area **pnext, 2374ca23e405STejun Heo struct vmap_area **pprev, 2375ca23e405STejun Heo unsigned long align) 2376ca23e405STejun Heo { 2377ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2378ca23e405STejun Heo unsigned long addr; 2379ca23e405STejun Heo 2380ca23e405STejun Heo if (*pnext) 2381ca23e405STejun Heo addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); 2382ca23e405STejun Heo else 2383ca23e405STejun Heo addr = vmalloc_end; 2384ca23e405STejun Heo 2385ca23e405STejun Heo while (*pprev && (*pprev)->va_end > addr) { 2386ca23e405STejun Heo *pnext = *pprev; 2387ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2388ca23e405STejun Heo } 2389ca23e405STejun Heo 2390ca23e405STejun Heo return addr; 2391ca23e405STejun Heo } 2392ca23e405STejun Heo 2393ca23e405STejun Heo /** 2394ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 2395ca23e405STejun Heo * @offsets: array containing offset of each area 2396ca23e405STejun Heo * @sizes: array containing size of each area 2397ca23e405STejun Heo * @nr_vms: the number of areas to allocate 2398ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2399ca23e405STejun Heo * 2400ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2401ca23e405STejun Heo * vm_structs on success, %NULL on failure 2402ca23e405STejun Heo * 2403ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 2404ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 2405ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 2406ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 2407ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 2408ec3f64fcSDavid Rientjes * areas are allocated from top. 2409ca23e405STejun Heo * 2410ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 2411ca23e405STejun Heo * does everything top-down and scans areas from the end looking for 2412ca23e405STejun Heo * matching slot. While scanning, if any of the areas overlaps with 2413ca23e405STejun Heo * existing vmap_area, the base address is pulled down to fit the 2414ca23e405STejun Heo * area. Scanning is repeated till all the areas fit and then all 2415ca23e405STejun Heo * necessary data structres are inserted and the result is returned. 2416ca23e405STejun Heo */ 2417ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2418ca23e405STejun Heo const size_t *sizes, int nr_vms, 2419ec3f64fcSDavid Rientjes size_t align) 2420ca23e405STejun Heo { 2421ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2422ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2423ca23e405STejun Heo struct vmap_area **vas, *prev, *next; 2424ca23e405STejun Heo struct vm_struct **vms; 2425ca23e405STejun Heo int area, area2, last_area, term_area; 2426ca23e405STejun Heo unsigned long base, start, end, last_end; 2427ca23e405STejun Heo bool purged = false; 2428ca23e405STejun Heo 2429ca23e405STejun Heo /* verify parameters and allocate data structures */ 2430891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 2431ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 2432ca23e405STejun Heo start = offsets[area]; 2433ca23e405STejun Heo end = start + sizes[area]; 2434ca23e405STejun Heo 2435ca23e405STejun Heo /* is everything aligned properly? */ 2436ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 2437ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 2438ca23e405STejun Heo 2439ca23e405STejun Heo /* detect the area with the highest address */ 2440ca23e405STejun Heo if (start > offsets[last_area]) 2441ca23e405STejun Heo last_area = area; 2442ca23e405STejun Heo 2443ca23e405STejun Heo for (area2 = 0; area2 < nr_vms; area2++) { 2444ca23e405STejun Heo unsigned long start2 = offsets[area2]; 2445ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 2446ca23e405STejun Heo 2447ca23e405STejun Heo if (area2 == area) 2448ca23e405STejun Heo continue; 2449ca23e405STejun Heo 2450ca23e405STejun Heo BUG_ON(start2 >= start && start2 < end); 2451ca23e405STejun Heo BUG_ON(end2 <= end && end2 > start); 2452ca23e405STejun Heo } 2453ca23e405STejun Heo } 2454ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 2455ca23e405STejun Heo 2456ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 2457ca23e405STejun Heo WARN_ON(true); 2458ca23e405STejun Heo return NULL; 2459ca23e405STejun Heo } 2460ca23e405STejun Heo 24614d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 24624d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 2463ca23e405STejun Heo if (!vas || !vms) 2464f1db7afdSKautuk Consul goto err_free2; 2465ca23e405STejun Heo 2466ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2467ec3f64fcSDavid Rientjes vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); 2468ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 2469ca23e405STejun Heo if (!vas[area] || !vms[area]) 2470ca23e405STejun Heo goto err_free; 2471ca23e405STejun Heo } 2472ca23e405STejun Heo retry: 2473ca23e405STejun Heo spin_lock(&vmap_area_lock); 2474ca23e405STejun Heo 2475ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 2476ca23e405STejun Heo area = term_area = last_area; 2477ca23e405STejun Heo start = offsets[area]; 2478ca23e405STejun Heo end = start + sizes[area]; 2479ca23e405STejun Heo 2480ca23e405STejun Heo if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { 2481ca23e405STejun Heo base = vmalloc_end - last_end; 2482ca23e405STejun Heo goto found; 2483ca23e405STejun Heo } 2484ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2485ca23e405STejun Heo 2486ca23e405STejun Heo while (true) { 2487ca23e405STejun Heo BUG_ON(next && next->va_end <= base + end); 2488ca23e405STejun Heo BUG_ON(prev && prev->va_end > base + end); 2489ca23e405STejun Heo 2490ca23e405STejun Heo /* 2491ca23e405STejun Heo * base might have underflowed, add last_end before 2492ca23e405STejun Heo * comparing. 2493ca23e405STejun Heo */ 2494ca23e405STejun Heo if (base + last_end < vmalloc_start + last_end) { 2495ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2496ca23e405STejun Heo if (!purged) { 2497ca23e405STejun Heo purge_vmap_area_lazy(); 2498ca23e405STejun Heo purged = true; 2499ca23e405STejun Heo goto retry; 2500ca23e405STejun Heo } 2501ca23e405STejun Heo goto err_free; 2502ca23e405STejun Heo } 2503ca23e405STejun Heo 2504ca23e405STejun Heo /* 2505ca23e405STejun Heo * If next overlaps, move base downwards so that it's 2506ca23e405STejun Heo * right below next and then recheck. 2507ca23e405STejun Heo */ 2508ca23e405STejun Heo if (next && next->va_start < base + end) { 2509ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2510ca23e405STejun Heo term_area = area; 2511ca23e405STejun Heo continue; 2512ca23e405STejun Heo } 2513ca23e405STejun Heo 2514ca23e405STejun Heo /* 2515ca23e405STejun Heo * If prev overlaps, shift down next and prev and move 2516ca23e405STejun Heo * base so that it's right below new next and then 2517ca23e405STejun Heo * recheck. 2518ca23e405STejun Heo */ 2519ca23e405STejun Heo if (prev && prev->va_end > base + start) { 2520ca23e405STejun Heo next = prev; 2521ca23e405STejun Heo prev = node_to_va(rb_prev(&next->rb_node)); 2522ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2523ca23e405STejun Heo term_area = area; 2524ca23e405STejun Heo continue; 2525ca23e405STejun Heo } 2526ca23e405STejun Heo 2527ca23e405STejun Heo /* 2528ca23e405STejun Heo * This area fits, move on to the previous one. If 2529ca23e405STejun Heo * the previous one is the terminal one, we're done. 2530ca23e405STejun Heo */ 2531ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 2532ca23e405STejun Heo if (area == term_area) 2533ca23e405STejun Heo break; 2534ca23e405STejun Heo start = offsets[area]; 2535ca23e405STejun Heo end = start + sizes[area]; 2536ca23e405STejun Heo pvm_find_next_prev(base + end, &next, &prev); 2537ca23e405STejun Heo } 2538ca23e405STejun Heo found: 2539ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 2540ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2541ca23e405STejun Heo struct vmap_area *va = vas[area]; 2542ca23e405STejun Heo 2543ca23e405STejun Heo va->va_start = base + offsets[area]; 2544ca23e405STejun Heo va->va_end = va->va_start + sizes[area]; 2545ca23e405STejun Heo __insert_vmap_area(va); 2546ca23e405STejun Heo } 2547ca23e405STejun Heo 2548ca23e405STejun Heo vmap_area_pcpu_hole = base + offsets[last_area]; 2549ca23e405STejun Heo 2550ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2551ca23e405STejun Heo 2552ca23e405STejun Heo /* insert all vm's */ 2553ca23e405STejun Heo for (area = 0; area < nr_vms; area++) 25543645cb4aSZhang Yanfei setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2555ca23e405STejun Heo pcpu_get_vm_areas); 2556ca23e405STejun Heo 2557ca23e405STejun Heo kfree(vas); 2558ca23e405STejun Heo return vms; 2559ca23e405STejun Heo 2560ca23e405STejun Heo err_free: 2561ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2562ca23e405STejun Heo kfree(vas[area]); 2563ca23e405STejun Heo kfree(vms[area]); 2564ca23e405STejun Heo } 2565f1db7afdSKautuk Consul err_free2: 2566ca23e405STejun Heo kfree(vas); 2567ca23e405STejun Heo kfree(vms); 2568ca23e405STejun Heo return NULL; 2569ca23e405STejun Heo } 2570ca23e405STejun Heo 2571ca23e405STejun Heo /** 2572ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 2573ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 2574ca23e405STejun Heo * @nr_vms: the number of allocated areas 2575ca23e405STejun Heo * 2576ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 2577ca23e405STejun Heo */ 2578ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 2579ca23e405STejun Heo { 2580ca23e405STejun Heo int i; 2581ca23e405STejun Heo 2582ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 2583ca23e405STejun Heo free_vm_area(vms[i]); 2584ca23e405STejun Heo kfree(vms); 2585ca23e405STejun Heo } 25864f8b02b4STejun Heo #endif /* CONFIG_SMP */ 2587a10aa579SChristoph Lameter 2588a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 2589a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 2590d4033afdSJoonsoo Kim __acquires(&vmap_area_lock) 2591a10aa579SChristoph Lameter { 2592d4033afdSJoonsoo Kim spin_lock(&vmap_area_lock); 25933f500069Szijun_hu return seq_list_start(&vmap_area_list, *pos); 2594a10aa579SChristoph Lameter } 2595a10aa579SChristoph Lameter 2596a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 2597a10aa579SChristoph Lameter { 25983f500069Szijun_hu return seq_list_next(p, &vmap_area_list, pos); 2599a10aa579SChristoph Lameter } 2600a10aa579SChristoph Lameter 2601a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 2602d4033afdSJoonsoo Kim __releases(&vmap_area_lock) 2603a10aa579SChristoph Lameter { 2604d4033afdSJoonsoo Kim spin_unlock(&vmap_area_lock); 2605a10aa579SChristoph Lameter } 2606a10aa579SChristoph Lameter 2607a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 2608a47a126aSEric Dumazet { 2609e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 2610a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 2611a47a126aSEric Dumazet 2612a47a126aSEric Dumazet if (!counters) 2613a47a126aSEric Dumazet return; 2614a47a126aSEric Dumazet 2615af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 2616af12346cSWanpeng Li return; 26177e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 26187e5b528bSDmitry Vyukov smp_rmb(); 2619af12346cSWanpeng Li 2620a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2621a47a126aSEric Dumazet 2622a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 2623a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 2624a47a126aSEric Dumazet 2625a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 2626a47a126aSEric Dumazet if (counters[nr]) 2627a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 2628a47a126aSEric Dumazet } 2629a47a126aSEric Dumazet } 2630a47a126aSEric Dumazet 2631a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 2632a10aa579SChristoph Lameter { 26333f500069Szijun_hu struct vmap_area *va; 2634d4033afdSJoonsoo Kim struct vm_struct *v; 2635d4033afdSJoonsoo Kim 26363f500069Szijun_hu va = list_entry(p, struct vmap_area, list); 26373f500069Szijun_hu 2638c2ce8c14SWanpeng Li /* 2639c2ce8c14SWanpeng Li * s_show can encounter race with remove_vm_area, !VM_VM_AREA on 2640c2ce8c14SWanpeng Li * behalf of vmap area is being tear down or vm_map_ram allocation. 2641c2ce8c14SWanpeng Li */ 2642c2ce8c14SWanpeng Li if (!(va->flags & VM_VM_AREA)) 2643d4033afdSJoonsoo Kim return 0; 2644d4033afdSJoonsoo Kim 2645d4033afdSJoonsoo Kim v = va->vm; 2646a10aa579SChristoph Lameter 264745ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 2648a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 2649a10aa579SChristoph Lameter 265062c70bceSJoe Perches if (v->caller) 265162c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 265223016969SChristoph Lameter 2653a10aa579SChristoph Lameter if (v->nr_pages) 2654a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 2655a10aa579SChristoph Lameter 2656a10aa579SChristoph Lameter if (v->phys_addr) 2657ffa71f33SKenji Kaneshige seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr); 2658a10aa579SChristoph Lameter 2659a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 2660f4527c90SFabian Frederick seq_puts(m, " ioremap"); 2661a10aa579SChristoph Lameter 2662a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 2663f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 2664a10aa579SChristoph Lameter 2665a10aa579SChristoph Lameter if (v->flags & VM_MAP) 2666f4527c90SFabian Frederick seq_puts(m, " vmap"); 2667a10aa579SChristoph Lameter 2668a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 2669f4527c90SFabian Frederick seq_puts(m, " user"); 2670a10aa579SChristoph Lameter 2671244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 2672f4527c90SFabian Frederick seq_puts(m, " vpages"); 2673a10aa579SChristoph Lameter 2674a47a126aSEric Dumazet show_numa_info(m, v); 2675a10aa579SChristoph Lameter seq_putc(m, '\n'); 2676a10aa579SChristoph Lameter return 0; 2677a10aa579SChristoph Lameter } 2678a10aa579SChristoph Lameter 26795f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 2680a10aa579SChristoph Lameter .start = s_start, 2681a10aa579SChristoph Lameter .next = s_next, 2682a10aa579SChristoph Lameter .stop = s_stop, 2683a10aa579SChristoph Lameter .show = s_show, 2684a10aa579SChristoph Lameter }; 26855f6a6a9cSAlexey Dobriyan 26865f6a6a9cSAlexey Dobriyan static int vmalloc_open(struct inode *inode, struct file *file) 26875f6a6a9cSAlexey Dobriyan { 2688703394c1SRob Jones if (IS_ENABLED(CONFIG_NUMA)) 2689703394c1SRob Jones return seq_open_private(file, &vmalloc_op, 2690703394c1SRob Jones nr_node_ids * sizeof(unsigned int)); 2691703394c1SRob Jones else 2692703394c1SRob Jones return seq_open(file, &vmalloc_op); 26935f6a6a9cSAlexey Dobriyan } 26945f6a6a9cSAlexey Dobriyan 26955f6a6a9cSAlexey Dobriyan static const struct file_operations proc_vmalloc_operations = { 26965f6a6a9cSAlexey Dobriyan .open = vmalloc_open, 26975f6a6a9cSAlexey Dobriyan .read = seq_read, 26985f6a6a9cSAlexey Dobriyan .llseek = seq_lseek, 26995f6a6a9cSAlexey Dobriyan .release = seq_release_private, 27005f6a6a9cSAlexey Dobriyan }; 27015f6a6a9cSAlexey Dobriyan 27025f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 27035f6a6a9cSAlexey Dobriyan { 27045f6a6a9cSAlexey Dobriyan proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 27055f6a6a9cSAlexey Dobriyan return 0; 27065f6a6a9cSAlexey Dobriyan } 27075f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 2708db3808c1SJoonsoo Kim 2709a10aa579SChristoph Lameter #endif 2710a10aa579SChristoph Lameter 2711