11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 15d43c36dcSAlexey Dobriyan #include <linux/sched.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/spinlock.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 213ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2223016969SChristoph Lameter #include <linux/kallsyms.h> 23db64fe02SNick Piggin #include <linux/list.h> 24db64fe02SNick Piggin #include <linux/rbtree.h> 25db64fe02SNick Piggin #include <linux/radix-tree.h> 26db64fe02SNick Piggin #include <linux/rcupdate.h> 27f0aa6617STejun Heo #include <linux/pfn.h> 2889219d37SCatalin Marinas #include <linux/kmemleak.h> 2960063497SArun Sharma #include <linux/atomic.h> 30*32fcfd40SAl Viro #include <linux/llist.h> 311da177e4SLinus Torvalds #include <asm/uaccess.h> 321da177e4SLinus Torvalds #include <asm/tlbflush.h> 332dca6999SDavid Miller #include <asm/shmparam.h> 341da177e4SLinus Torvalds 35*32fcfd40SAl Viro struct vfree_deferred { 36*32fcfd40SAl Viro struct llist_head list; 37*32fcfd40SAl Viro struct work_struct wq; 38*32fcfd40SAl Viro }; 39*32fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 40*32fcfd40SAl Viro 41*32fcfd40SAl Viro static void __vunmap(const void *, int); 42*32fcfd40SAl Viro 43*32fcfd40SAl Viro static void free_work(struct work_struct *w) 44*32fcfd40SAl Viro { 45*32fcfd40SAl Viro struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 46*32fcfd40SAl Viro struct llist_node *llnode = llist_del_all(&p->list); 47*32fcfd40SAl Viro while (llnode) { 48*32fcfd40SAl Viro void *p = llnode; 49*32fcfd40SAl Viro llnode = llist_next(llnode); 50*32fcfd40SAl Viro __vunmap(p, 1); 51*32fcfd40SAl Viro } 52*32fcfd40SAl Viro } 53*32fcfd40SAl Viro 54db64fe02SNick Piggin /*** Page table manipulation functions ***/ 55b221385bSAdrian Bunk 561da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 571da177e4SLinus Torvalds { 581da177e4SLinus Torvalds pte_t *pte; 591da177e4SLinus Torvalds 601da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 611da177e4SLinus Torvalds do { 621da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 631da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 641da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 651da177e4SLinus Torvalds } 661da177e4SLinus Torvalds 67db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 681da177e4SLinus Torvalds { 691da177e4SLinus Torvalds pmd_t *pmd; 701da177e4SLinus Torvalds unsigned long next; 711da177e4SLinus Torvalds 721da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 731da177e4SLinus Torvalds do { 741da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 751da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 761da177e4SLinus Torvalds continue; 771da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 781da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 791da177e4SLinus Torvalds } 801da177e4SLinus Torvalds 81db64fe02SNick Piggin static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 821da177e4SLinus Torvalds { 831da177e4SLinus Torvalds pud_t *pud; 841da177e4SLinus Torvalds unsigned long next; 851da177e4SLinus Torvalds 861da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 871da177e4SLinus Torvalds do { 881da177e4SLinus Torvalds next = pud_addr_end(addr, end); 891da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 901da177e4SLinus Torvalds continue; 911da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 921da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 931da177e4SLinus Torvalds } 941da177e4SLinus Torvalds 95db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 961da177e4SLinus Torvalds { 971da177e4SLinus Torvalds pgd_t *pgd; 981da177e4SLinus Torvalds unsigned long next; 991da177e4SLinus Torvalds 1001da177e4SLinus Torvalds BUG_ON(addr >= end); 1011da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1021da177e4SLinus Torvalds do { 1031da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 1041da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 1051da177e4SLinus Torvalds continue; 1061da177e4SLinus Torvalds vunmap_pud_range(pgd, addr, next); 1071da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1081da177e4SLinus Torvalds } 1091da177e4SLinus Torvalds 1101da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 111db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1121da177e4SLinus Torvalds { 1131da177e4SLinus Torvalds pte_t *pte; 1141da177e4SLinus Torvalds 115db64fe02SNick Piggin /* 116db64fe02SNick Piggin * nr is a running index into the array which helps higher level 117db64fe02SNick Piggin * callers keep track of where we're up to. 118db64fe02SNick Piggin */ 119db64fe02SNick Piggin 120872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1211da177e4SLinus Torvalds if (!pte) 1221da177e4SLinus Torvalds return -ENOMEM; 1231da177e4SLinus Torvalds do { 124db64fe02SNick Piggin struct page *page = pages[*nr]; 125db64fe02SNick Piggin 126db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 127db64fe02SNick Piggin return -EBUSY; 128db64fe02SNick Piggin if (WARN_ON(!page)) 1291da177e4SLinus Torvalds return -ENOMEM; 1301da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 131db64fe02SNick Piggin (*nr)++; 1321da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1331da177e4SLinus Torvalds return 0; 1341da177e4SLinus Torvalds } 1351da177e4SLinus Torvalds 136db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 137db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1381da177e4SLinus Torvalds { 1391da177e4SLinus Torvalds pmd_t *pmd; 1401da177e4SLinus Torvalds unsigned long next; 1411da177e4SLinus Torvalds 1421da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1431da177e4SLinus Torvalds if (!pmd) 1441da177e4SLinus Torvalds return -ENOMEM; 1451da177e4SLinus Torvalds do { 1461da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 147db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1481da177e4SLinus Torvalds return -ENOMEM; 1491da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1501da177e4SLinus Torvalds return 0; 1511da177e4SLinus Torvalds } 1521da177e4SLinus Torvalds 153db64fe02SNick Piggin static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 154db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1551da177e4SLinus Torvalds { 1561da177e4SLinus Torvalds pud_t *pud; 1571da177e4SLinus Torvalds unsigned long next; 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds pud = pud_alloc(&init_mm, pgd, addr); 1601da177e4SLinus Torvalds if (!pud) 1611da177e4SLinus Torvalds return -ENOMEM; 1621da177e4SLinus Torvalds do { 1631da177e4SLinus Torvalds next = pud_addr_end(addr, end); 164db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 1651da177e4SLinus Torvalds return -ENOMEM; 1661da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1671da177e4SLinus Torvalds return 0; 1681da177e4SLinus Torvalds } 1691da177e4SLinus Torvalds 170db64fe02SNick Piggin /* 171db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 172db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 173db64fe02SNick Piggin * 174db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 175db64fe02SNick Piggin */ 1768fc48985STejun Heo static int vmap_page_range_noflush(unsigned long start, unsigned long end, 177db64fe02SNick Piggin pgprot_t prot, struct page **pages) 1781da177e4SLinus Torvalds { 1791da177e4SLinus Torvalds pgd_t *pgd; 1801da177e4SLinus Torvalds unsigned long next; 1812e4e27c7SAdam Lackorzynski unsigned long addr = start; 182db64fe02SNick Piggin int err = 0; 183db64fe02SNick Piggin int nr = 0; 1841da177e4SLinus Torvalds 1851da177e4SLinus Torvalds BUG_ON(addr >= end); 1861da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1871da177e4SLinus Torvalds do { 1881da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 189db64fe02SNick Piggin err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 1901da177e4SLinus Torvalds if (err) 191bf88c8c8SFigo.zhang return err; 1921da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 193db64fe02SNick Piggin 194db64fe02SNick Piggin return nr; 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 1978fc48985STejun Heo static int vmap_page_range(unsigned long start, unsigned long end, 1988fc48985STejun Heo pgprot_t prot, struct page **pages) 1998fc48985STejun Heo { 2008fc48985STejun Heo int ret; 2018fc48985STejun Heo 2028fc48985STejun Heo ret = vmap_page_range_noflush(start, end, prot, pages); 2038fc48985STejun Heo flush_cache_vmap(start, end); 2048fc48985STejun Heo return ret; 2058fc48985STejun Heo } 2068fc48985STejun Heo 20781ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 20873bdf0a6SLinus Torvalds { 20973bdf0a6SLinus Torvalds /* 210ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 21173bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 21273bdf0a6SLinus Torvalds * just put it in the vmalloc space. 21373bdf0a6SLinus Torvalds */ 21473bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 21573bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 21673bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 21773bdf0a6SLinus Torvalds return 1; 21873bdf0a6SLinus Torvalds #endif 21973bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 22073bdf0a6SLinus Torvalds } 22173bdf0a6SLinus Torvalds 22248667e7aSChristoph Lameter /* 223db64fe02SNick Piggin * Walk a vmap address to the struct page it maps. 22448667e7aSChristoph Lameter */ 225b3bdda02SChristoph Lameter struct page *vmalloc_to_page(const void *vmalloc_addr) 22648667e7aSChristoph Lameter { 22748667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 22848667e7aSChristoph Lameter struct page *page = NULL; 22948667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 23048667e7aSChristoph Lameter 2317aa413deSIngo Molnar /* 2327aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2337aa413deSIngo Molnar * architectures that do not vmalloc module space 2347aa413deSIngo Molnar */ 23573bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 23659ea7463SJiri Slaby 23748667e7aSChristoph Lameter if (!pgd_none(*pgd)) { 238db64fe02SNick Piggin pud_t *pud = pud_offset(pgd, addr); 23948667e7aSChristoph Lameter if (!pud_none(*pud)) { 240db64fe02SNick Piggin pmd_t *pmd = pmd_offset(pud, addr); 24148667e7aSChristoph Lameter if (!pmd_none(*pmd)) { 242db64fe02SNick Piggin pte_t *ptep, pte; 243db64fe02SNick Piggin 24448667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 24548667e7aSChristoph Lameter pte = *ptep; 24648667e7aSChristoph Lameter if (pte_present(pte)) 24748667e7aSChristoph Lameter page = pte_page(pte); 24848667e7aSChristoph Lameter pte_unmap(ptep); 24948667e7aSChristoph Lameter } 25048667e7aSChristoph Lameter } 25148667e7aSChristoph Lameter } 25248667e7aSChristoph Lameter return page; 25348667e7aSChristoph Lameter } 25448667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_page); 25548667e7aSChristoph Lameter 25648667e7aSChristoph Lameter /* 25748667e7aSChristoph Lameter * Map a vmalloc()-space virtual address to the physical page frame number. 25848667e7aSChristoph Lameter */ 259b3bdda02SChristoph Lameter unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 26048667e7aSChristoph Lameter { 26148667e7aSChristoph Lameter return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 26248667e7aSChristoph Lameter } 26348667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_pfn); 26448667e7aSChristoph Lameter 265db64fe02SNick Piggin 266db64fe02SNick Piggin /*** Global kva allocator ***/ 267db64fe02SNick Piggin 268db64fe02SNick Piggin #define VM_LAZY_FREE 0x01 269db64fe02SNick Piggin #define VM_LAZY_FREEING 0x02 270db64fe02SNick Piggin #define VM_VM_AREA 0x04 271db64fe02SNick Piggin 272db64fe02SNick Piggin struct vmap_area { 273db64fe02SNick Piggin unsigned long va_start; 274db64fe02SNick Piggin unsigned long va_end; 275db64fe02SNick Piggin unsigned long flags; 276db64fe02SNick Piggin struct rb_node rb_node; /* address sorted rbtree */ 277db64fe02SNick Piggin struct list_head list; /* address sorted list */ 278db64fe02SNick Piggin struct list_head purge_list; /* "lazy purge" list */ 279db1aecafSMinchan Kim struct vm_struct *vm; 280db64fe02SNick Piggin struct rcu_head rcu_head; 281db64fe02SNick Piggin }; 282db64fe02SNick Piggin 283db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 284db64fe02SNick Piggin static LIST_HEAD(vmap_area_list); 28589699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 28689699605SNick Piggin 28789699605SNick Piggin /* The vmap cache globals are protected by vmap_area_lock */ 28889699605SNick Piggin static struct rb_node *free_vmap_cache; 28989699605SNick Piggin static unsigned long cached_hole_size; 29089699605SNick Piggin static unsigned long cached_vstart; 29189699605SNick Piggin static unsigned long cached_align; 29289699605SNick Piggin 293ca23e405STejun Heo static unsigned long vmap_area_pcpu_hole; 294db64fe02SNick Piggin 295db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 2961da177e4SLinus Torvalds { 297db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 298db64fe02SNick Piggin 299db64fe02SNick Piggin while (n) { 300db64fe02SNick Piggin struct vmap_area *va; 301db64fe02SNick Piggin 302db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 303db64fe02SNick Piggin if (addr < va->va_start) 304db64fe02SNick Piggin n = n->rb_left; 305db64fe02SNick Piggin else if (addr > va->va_start) 306db64fe02SNick Piggin n = n->rb_right; 307db64fe02SNick Piggin else 308db64fe02SNick Piggin return va; 309db64fe02SNick Piggin } 310db64fe02SNick Piggin 311db64fe02SNick Piggin return NULL; 312db64fe02SNick Piggin } 313db64fe02SNick Piggin 314db64fe02SNick Piggin static void __insert_vmap_area(struct vmap_area *va) 315db64fe02SNick Piggin { 316db64fe02SNick Piggin struct rb_node **p = &vmap_area_root.rb_node; 317db64fe02SNick Piggin struct rb_node *parent = NULL; 318db64fe02SNick Piggin struct rb_node *tmp; 319db64fe02SNick Piggin 320db64fe02SNick Piggin while (*p) { 321170168d0SNamhyung Kim struct vmap_area *tmp_va; 322db64fe02SNick Piggin 323db64fe02SNick Piggin parent = *p; 324170168d0SNamhyung Kim tmp_va = rb_entry(parent, struct vmap_area, rb_node); 325170168d0SNamhyung Kim if (va->va_start < tmp_va->va_end) 326db64fe02SNick Piggin p = &(*p)->rb_left; 327170168d0SNamhyung Kim else if (va->va_end > tmp_va->va_start) 328db64fe02SNick Piggin p = &(*p)->rb_right; 329db64fe02SNick Piggin else 330db64fe02SNick Piggin BUG(); 331db64fe02SNick Piggin } 332db64fe02SNick Piggin 333db64fe02SNick Piggin rb_link_node(&va->rb_node, parent, p); 334db64fe02SNick Piggin rb_insert_color(&va->rb_node, &vmap_area_root); 335db64fe02SNick Piggin 336db64fe02SNick Piggin /* address-sort this list so it is usable like the vmlist */ 337db64fe02SNick Piggin tmp = rb_prev(&va->rb_node); 338db64fe02SNick Piggin if (tmp) { 339db64fe02SNick Piggin struct vmap_area *prev; 340db64fe02SNick Piggin prev = rb_entry(tmp, struct vmap_area, rb_node); 341db64fe02SNick Piggin list_add_rcu(&va->list, &prev->list); 342db64fe02SNick Piggin } else 343db64fe02SNick Piggin list_add_rcu(&va->list, &vmap_area_list); 344db64fe02SNick Piggin } 345db64fe02SNick Piggin 346db64fe02SNick Piggin static void purge_vmap_area_lazy(void); 347db64fe02SNick Piggin 348db64fe02SNick Piggin /* 349db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 350db64fe02SNick Piggin * vstart and vend. 351db64fe02SNick Piggin */ 352db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 353db64fe02SNick Piggin unsigned long align, 354db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 355db64fe02SNick Piggin int node, gfp_t gfp_mask) 356db64fe02SNick Piggin { 357db64fe02SNick Piggin struct vmap_area *va; 358db64fe02SNick Piggin struct rb_node *n; 3591da177e4SLinus Torvalds unsigned long addr; 360db64fe02SNick Piggin int purged = 0; 36189699605SNick Piggin struct vmap_area *first; 362db64fe02SNick Piggin 3637766970cSNick Piggin BUG_ON(!size); 364db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 36589699605SNick Piggin BUG_ON(!is_power_of_2(align)); 366db64fe02SNick Piggin 367db64fe02SNick Piggin va = kmalloc_node(sizeof(struct vmap_area), 368db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 369db64fe02SNick Piggin if (unlikely(!va)) 370db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 371db64fe02SNick Piggin 372db64fe02SNick Piggin retry: 373db64fe02SNick Piggin spin_lock(&vmap_area_lock); 37489699605SNick Piggin /* 37589699605SNick Piggin * Invalidate cache if we have more permissive parameters. 37689699605SNick Piggin * cached_hole_size notes the largest hole noticed _below_ 37789699605SNick Piggin * the vmap_area cached in free_vmap_cache: if size fits 37889699605SNick Piggin * into that hole, we want to scan from vstart to reuse 37989699605SNick Piggin * the hole instead of allocating above free_vmap_cache. 38089699605SNick Piggin * Note that __free_vmap_area may update free_vmap_cache 38189699605SNick Piggin * without updating cached_hole_size or cached_align. 38289699605SNick Piggin */ 38389699605SNick Piggin if (!free_vmap_cache || 38489699605SNick Piggin size < cached_hole_size || 38589699605SNick Piggin vstart < cached_vstart || 38689699605SNick Piggin align < cached_align) { 38789699605SNick Piggin nocache: 38889699605SNick Piggin cached_hole_size = 0; 38989699605SNick Piggin free_vmap_cache = NULL; 39089699605SNick Piggin } 39189699605SNick Piggin /* record if we encounter less permissive parameters */ 39289699605SNick Piggin cached_vstart = vstart; 39389699605SNick Piggin cached_align = align; 39489699605SNick Piggin 39589699605SNick Piggin /* find starting point for our search */ 39689699605SNick Piggin if (free_vmap_cache) { 39789699605SNick Piggin first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 398248ac0e1SJohannes Weiner addr = ALIGN(first->va_end, align); 39989699605SNick Piggin if (addr < vstart) 40089699605SNick Piggin goto nocache; 4017766970cSNick Piggin if (addr + size - 1 < addr) 4027766970cSNick Piggin goto overflow; 4037766970cSNick Piggin 40489699605SNick Piggin } else { 40589699605SNick Piggin addr = ALIGN(vstart, align); 40689699605SNick Piggin if (addr + size - 1 < addr) 40789699605SNick Piggin goto overflow; 408db64fe02SNick Piggin 40989699605SNick Piggin n = vmap_area_root.rb_node; 41089699605SNick Piggin first = NULL; 41189699605SNick Piggin 41289699605SNick Piggin while (n) { 413db64fe02SNick Piggin struct vmap_area *tmp; 414db64fe02SNick Piggin tmp = rb_entry(n, struct vmap_area, rb_node); 415db64fe02SNick Piggin if (tmp->va_end >= addr) { 416db64fe02SNick Piggin first = tmp; 41789699605SNick Piggin if (tmp->va_start <= addr) 41889699605SNick Piggin break; 419db64fe02SNick Piggin n = n->rb_left; 42089699605SNick Piggin } else 421db64fe02SNick Piggin n = n->rb_right; 422db64fe02SNick Piggin } 423db64fe02SNick Piggin 424db64fe02SNick Piggin if (!first) 425db64fe02SNick Piggin goto found; 426db64fe02SNick Piggin } 427db64fe02SNick Piggin 42889699605SNick Piggin /* from the starting point, walk areas until a suitable hole is found */ 429248ac0e1SJohannes Weiner while (addr + size > first->va_start && addr + size <= vend) { 43089699605SNick Piggin if (addr + cached_hole_size < first->va_start) 43189699605SNick Piggin cached_hole_size = first->va_start - addr; 432248ac0e1SJohannes Weiner addr = ALIGN(first->va_end, align); 4337766970cSNick Piggin if (addr + size - 1 < addr) 4347766970cSNick Piggin goto overflow; 435db64fe02SNick Piggin 43692ca922fSHong zhi guo if (list_is_last(&first->list, &vmap_area_list)) 437db64fe02SNick Piggin goto found; 43892ca922fSHong zhi guo 43992ca922fSHong zhi guo first = list_entry(first->list.next, 44092ca922fSHong zhi guo struct vmap_area, list); 441db64fe02SNick Piggin } 44289699605SNick Piggin 443db64fe02SNick Piggin found: 44489699605SNick Piggin if (addr + size > vend) 44589699605SNick Piggin goto overflow; 44689699605SNick Piggin 44789699605SNick Piggin va->va_start = addr; 44889699605SNick Piggin va->va_end = addr + size; 44989699605SNick Piggin va->flags = 0; 45089699605SNick Piggin __insert_vmap_area(va); 45189699605SNick Piggin free_vmap_cache = &va->rb_node; 45289699605SNick Piggin spin_unlock(&vmap_area_lock); 45389699605SNick Piggin 45489699605SNick Piggin BUG_ON(va->va_start & (align-1)); 45589699605SNick Piggin BUG_ON(va->va_start < vstart); 45689699605SNick Piggin BUG_ON(va->va_end > vend); 45789699605SNick Piggin 45889699605SNick Piggin return va; 45989699605SNick Piggin 4607766970cSNick Piggin overflow: 461db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 462db64fe02SNick Piggin if (!purged) { 463db64fe02SNick Piggin purge_vmap_area_lazy(); 464db64fe02SNick Piggin purged = 1; 465db64fe02SNick Piggin goto retry; 466db64fe02SNick Piggin } 467db64fe02SNick Piggin if (printk_ratelimit()) 468c1279c4eSGlauber Costa printk(KERN_WARNING 469c1279c4eSGlauber Costa "vmap allocation for size %lu failed: " 470c1279c4eSGlauber Costa "use vmalloc=<size> to increase size.\n", size); 4712498ce42SRalph Wuerthner kfree(va); 472db64fe02SNick Piggin return ERR_PTR(-EBUSY); 473db64fe02SNick Piggin } 474db64fe02SNick Piggin 475db64fe02SNick Piggin static void __free_vmap_area(struct vmap_area *va) 476db64fe02SNick Piggin { 477db64fe02SNick Piggin BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 47889699605SNick Piggin 47989699605SNick Piggin if (free_vmap_cache) { 48089699605SNick Piggin if (va->va_end < cached_vstart) { 48189699605SNick Piggin free_vmap_cache = NULL; 48289699605SNick Piggin } else { 48389699605SNick Piggin struct vmap_area *cache; 48489699605SNick Piggin cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 48589699605SNick Piggin if (va->va_start <= cache->va_start) { 48689699605SNick Piggin free_vmap_cache = rb_prev(&va->rb_node); 48789699605SNick Piggin /* 48889699605SNick Piggin * We don't try to update cached_hole_size or 48989699605SNick Piggin * cached_align, but it won't go very wrong. 49089699605SNick Piggin */ 49189699605SNick Piggin } 49289699605SNick Piggin } 49389699605SNick Piggin } 494db64fe02SNick Piggin rb_erase(&va->rb_node, &vmap_area_root); 495db64fe02SNick Piggin RB_CLEAR_NODE(&va->rb_node); 496db64fe02SNick Piggin list_del_rcu(&va->list); 497db64fe02SNick Piggin 498ca23e405STejun Heo /* 499ca23e405STejun Heo * Track the highest possible candidate for pcpu area 500ca23e405STejun Heo * allocation. Areas outside of vmalloc area can be returned 501ca23e405STejun Heo * here too, consider only end addresses which fall inside 502ca23e405STejun Heo * vmalloc area proper. 503ca23e405STejun Heo */ 504ca23e405STejun Heo if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 505ca23e405STejun Heo vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 506ca23e405STejun Heo 50714769de9SLai Jiangshan kfree_rcu(va, rcu_head); 508db64fe02SNick Piggin } 509db64fe02SNick Piggin 510db64fe02SNick Piggin /* 511db64fe02SNick Piggin * Free a region of KVA allocated by alloc_vmap_area 512db64fe02SNick Piggin */ 513db64fe02SNick Piggin static void free_vmap_area(struct vmap_area *va) 514db64fe02SNick Piggin { 515db64fe02SNick Piggin spin_lock(&vmap_area_lock); 516db64fe02SNick Piggin __free_vmap_area(va); 517db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 518db64fe02SNick Piggin } 519db64fe02SNick Piggin 520db64fe02SNick Piggin /* 521db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 522db64fe02SNick Piggin */ 523db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 524db64fe02SNick Piggin { 525db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 526db64fe02SNick Piggin } 527db64fe02SNick Piggin 528cd52858cSNick Piggin static void vmap_debug_free_range(unsigned long start, unsigned long end) 529cd52858cSNick Piggin { 530cd52858cSNick Piggin /* 531cd52858cSNick Piggin * Unmap page tables and force a TLB flush immediately if 532cd52858cSNick Piggin * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 533cd52858cSNick Piggin * bugs similarly to those in linear kernel virtual address 534cd52858cSNick Piggin * space after a page has been freed. 535cd52858cSNick Piggin * 536cd52858cSNick Piggin * All the lazy freeing logic is still retained, in order to 537cd52858cSNick Piggin * minimise intrusiveness of this debugging feature. 538cd52858cSNick Piggin * 539cd52858cSNick Piggin * This is going to be *slow* (linear kernel virtual address 540cd52858cSNick Piggin * debugging doesn't do a broadcast TLB flush so it is a lot 541cd52858cSNick Piggin * faster). 542cd52858cSNick Piggin */ 543cd52858cSNick Piggin #ifdef CONFIG_DEBUG_PAGEALLOC 544cd52858cSNick Piggin vunmap_page_range(start, end); 545cd52858cSNick Piggin flush_tlb_kernel_range(start, end); 546cd52858cSNick Piggin #endif 547cd52858cSNick Piggin } 548cd52858cSNick Piggin 549db64fe02SNick Piggin /* 550db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 551db64fe02SNick Piggin * before attempting to purge with a TLB flush. 552db64fe02SNick Piggin * 553db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 554db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 555db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 556db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 557db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 558db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 559db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 560db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 561db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 562db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 563db64fe02SNick Piggin * becomes a problem on bigger systems. 564db64fe02SNick Piggin */ 565db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 566db64fe02SNick Piggin { 567db64fe02SNick Piggin unsigned int log; 568db64fe02SNick Piggin 569db64fe02SNick Piggin log = fls(num_online_cpus()); 570db64fe02SNick Piggin 571db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 572db64fe02SNick Piggin } 573db64fe02SNick Piggin 574db64fe02SNick Piggin static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 575db64fe02SNick Piggin 57602b709dfSNick Piggin /* for per-CPU blocks */ 57702b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 57802b709dfSNick Piggin 579db64fe02SNick Piggin /* 5803ee48b6aSCliff Wickman * called before a call to iounmap() if the caller wants vm_area_struct's 5813ee48b6aSCliff Wickman * immediately freed. 5823ee48b6aSCliff Wickman */ 5833ee48b6aSCliff Wickman void set_iounmap_nonlazy(void) 5843ee48b6aSCliff Wickman { 5853ee48b6aSCliff Wickman atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); 5863ee48b6aSCliff Wickman } 5873ee48b6aSCliff Wickman 5883ee48b6aSCliff Wickman /* 589db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 590db64fe02SNick Piggin * 591db64fe02SNick Piggin * If sync is 0 then don't purge if there is already a purge in progress. 592db64fe02SNick Piggin * If force_flush is 1, then flush kernel TLBs between *start and *end even 593db64fe02SNick Piggin * if we found no lazy vmap areas to unmap (callers can use this to optimise 594db64fe02SNick Piggin * their own TLB flushing). 595db64fe02SNick Piggin * Returns with *start = min(*start, lowest purged address) 596db64fe02SNick Piggin * *end = max(*end, highest purged address) 597db64fe02SNick Piggin */ 598db64fe02SNick Piggin static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 599db64fe02SNick Piggin int sync, int force_flush) 600db64fe02SNick Piggin { 60146666d8aSAndrew Morton static DEFINE_SPINLOCK(purge_lock); 602db64fe02SNick Piggin LIST_HEAD(valist); 603db64fe02SNick Piggin struct vmap_area *va; 604cbb76676SVegard Nossum struct vmap_area *n_va; 605db64fe02SNick Piggin int nr = 0; 606db64fe02SNick Piggin 607db64fe02SNick Piggin /* 608db64fe02SNick Piggin * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 609db64fe02SNick Piggin * should not expect such behaviour. This just simplifies locking for 610db64fe02SNick Piggin * the case that isn't actually used at the moment anyway. 611db64fe02SNick Piggin */ 612db64fe02SNick Piggin if (!sync && !force_flush) { 61346666d8aSAndrew Morton if (!spin_trylock(&purge_lock)) 614db64fe02SNick Piggin return; 615db64fe02SNick Piggin } else 61646666d8aSAndrew Morton spin_lock(&purge_lock); 617db64fe02SNick Piggin 61802b709dfSNick Piggin if (sync) 61902b709dfSNick Piggin purge_fragmented_blocks_allcpus(); 62002b709dfSNick Piggin 621db64fe02SNick Piggin rcu_read_lock(); 622db64fe02SNick Piggin list_for_each_entry_rcu(va, &vmap_area_list, list) { 623db64fe02SNick Piggin if (va->flags & VM_LAZY_FREE) { 624db64fe02SNick Piggin if (va->va_start < *start) 625db64fe02SNick Piggin *start = va->va_start; 626db64fe02SNick Piggin if (va->va_end > *end) 627db64fe02SNick Piggin *end = va->va_end; 628db64fe02SNick Piggin nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 629db64fe02SNick Piggin list_add_tail(&va->purge_list, &valist); 630db64fe02SNick Piggin va->flags |= VM_LAZY_FREEING; 631db64fe02SNick Piggin va->flags &= ~VM_LAZY_FREE; 632db64fe02SNick Piggin } 633db64fe02SNick Piggin } 634db64fe02SNick Piggin rcu_read_unlock(); 635db64fe02SNick Piggin 63688f50044SYongseok Koh if (nr) 637db64fe02SNick Piggin atomic_sub(nr, &vmap_lazy_nr); 638db64fe02SNick Piggin 639db64fe02SNick Piggin if (nr || force_flush) 640db64fe02SNick Piggin flush_tlb_kernel_range(*start, *end); 641db64fe02SNick Piggin 642db64fe02SNick Piggin if (nr) { 643db64fe02SNick Piggin spin_lock(&vmap_area_lock); 644cbb76676SVegard Nossum list_for_each_entry_safe(va, n_va, &valist, purge_list) 645db64fe02SNick Piggin __free_vmap_area(va); 646db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 647db64fe02SNick Piggin } 64846666d8aSAndrew Morton spin_unlock(&purge_lock); 649db64fe02SNick Piggin } 650db64fe02SNick Piggin 651db64fe02SNick Piggin /* 652496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 653496850e5SNick Piggin * is already purging. 654496850e5SNick Piggin */ 655496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 656496850e5SNick Piggin { 657496850e5SNick Piggin unsigned long start = ULONG_MAX, end = 0; 658496850e5SNick Piggin 659496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 0, 0); 660496850e5SNick Piggin } 661496850e5SNick Piggin 662496850e5SNick Piggin /* 663db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 664db64fe02SNick Piggin */ 665db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 666db64fe02SNick Piggin { 667db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 668db64fe02SNick Piggin 669496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, 0); 670db64fe02SNick Piggin } 671db64fe02SNick Piggin 672db64fe02SNick Piggin /* 67364141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 67464141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 67564141da5SJeremy Fitzhardinge * previously. 676db64fe02SNick Piggin */ 67764141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 678db64fe02SNick Piggin { 679db64fe02SNick Piggin va->flags |= VM_LAZY_FREE; 680db64fe02SNick Piggin atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 681db64fe02SNick Piggin if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 682496850e5SNick Piggin try_purge_vmap_area_lazy(); 683db64fe02SNick Piggin } 684db64fe02SNick Piggin 685b29acbdcSNick Piggin /* 68664141da5SJeremy Fitzhardinge * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 68764141da5SJeremy Fitzhardinge * called for the correct range previously. 68864141da5SJeremy Fitzhardinge */ 68964141da5SJeremy Fitzhardinge static void free_unmap_vmap_area_noflush(struct vmap_area *va) 69064141da5SJeremy Fitzhardinge { 69164141da5SJeremy Fitzhardinge unmap_vmap_area(va); 69264141da5SJeremy Fitzhardinge free_vmap_area_noflush(va); 69364141da5SJeremy Fitzhardinge } 69464141da5SJeremy Fitzhardinge 69564141da5SJeremy Fitzhardinge /* 696b29acbdcSNick Piggin * Free and unmap a vmap area 697b29acbdcSNick Piggin */ 698b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 699b29acbdcSNick Piggin { 700b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 701b29acbdcSNick Piggin free_unmap_vmap_area_noflush(va); 702b29acbdcSNick Piggin } 703b29acbdcSNick Piggin 704db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 705db64fe02SNick Piggin { 706db64fe02SNick Piggin struct vmap_area *va; 707db64fe02SNick Piggin 708db64fe02SNick Piggin spin_lock(&vmap_area_lock); 709db64fe02SNick Piggin va = __find_vmap_area(addr); 710db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 711db64fe02SNick Piggin 712db64fe02SNick Piggin return va; 713db64fe02SNick Piggin } 714db64fe02SNick Piggin 715db64fe02SNick Piggin static void free_unmap_vmap_area_addr(unsigned long addr) 716db64fe02SNick Piggin { 717db64fe02SNick Piggin struct vmap_area *va; 718db64fe02SNick Piggin 719db64fe02SNick Piggin va = find_vmap_area(addr); 720db64fe02SNick Piggin BUG_ON(!va); 721db64fe02SNick Piggin free_unmap_vmap_area(va); 722db64fe02SNick Piggin } 723db64fe02SNick Piggin 724db64fe02SNick Piggin 725db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 726db64fe02SNick Piggin 727db64fe02SNick Piggin /* 728db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 729db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 730db64fe02SNick Piggin */ 731db64fe02SNick Piggin /* 732db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 733db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 734db64fe02SNick Piggin * instead (we just need a rough idea) 735db64fe02SNick Piggin */ 736db64fe02SNick Piggin #if BITS_PER_LONG == 32 737db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 738db64fe02SNick Piggin #else 739db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 740db64fe02SNick Piggin #endif 741db64fe02SNick Piggin 742db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 743db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 744db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 745db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 746db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 747db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 748f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 749f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 750db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 751f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 752db64fe02SNick Piggin 753db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 754db64fe02SNick Piggin 7559b463334SJeremy Fitzhardinge static bool vmap_initialized __read_mostly = false; 7569b463334SJeremy Fitzhardinge 757db64fe02SNick Piggin struct vmap_block_queue { 758db64fe02SNick Piggin spinlock_t lock; 759db64fe02SNick Piggin struct list_head free; 760db64fe02SNick Piggin }; 761db64fe02SNick Piggin 762db64fe02SNick Piggin struct vmap_block { 763db64fe02SNick Piggin spinlock_t lock; 764db64fe02SNick Piggin struct vmap_area *va; 765db64fe02SNick Piggin struct vmap_block_queue *vbq; 766db64fe02SNick Piggin unsigned long free, dirty; 767db64fe02SNick Piggin DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 768db64fe02SNick Piggin DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 769db64fe02SNick Piggin struct list_head free_list; 770db64fe02SNick Piggin struct rcu_head rcu_head; 77102b709dfSNick Piggin struct list_head purge; 772db64fe02SNick Piggin }; 773db64fe02SNick Piggin 774db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 775db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 776db64fe02SNick Piggin 777db64fe02SNick Piggin /* 778db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 779db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 780db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 781db64fe02SNick Piggin */ 782db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 783db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 784db64fe02SNick Piggin 785db64fe02SNick Piggin /* 786db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 787db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 788db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 789db64fe02SNick Piggin * big problem. 790db64fe02SNick Piggin */ 791db64fe02SNick Piggin 792db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 793db64fe02SNick Piggin { 794db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 795db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 796db64fe02SNick Piggin return addr; 797db64fe02SNick Piggin } 798db64fe02SNick Piggin 799db64fe02SNick Piggin static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 800db64fe02SNick Piggin { 801db64fe02SNick Piggin struct vmap_block_queue *vbq; 802db64fe02SNick Piggin struct vmap_block *vb; 803db64fe02SNick Piggin struct vmap_area *va; 804db64fe02SNick Piggin unsigned long vb_idx; 805db64fe02SNick Piggin int node, err; 806db64fe02SNick Piggin 807db64fe02SNick Piggin node = numa_node_id(); 808db64fe02SNick Piggin 809db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 810db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 811db64fe02SNick Piggin if (unlikely(!vb)) 812db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 813db64fe02SNick Piggin 814db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 815db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 816db64fe02SNick Piggin node, gfp_mask); 817ddf9c6d4STobias Klauser if (IS_ERR(va)) { 818db64fe02SNick Piggin kfree(vb); 819e7d86340SJulia Lawall return ERR_CAST(va); 820db64fe02SNick Piggin } 821db64fe02SNick Piggin 822db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 823db64fe02SNick Piggin if (unlikely(err)) { 824db64fe02SNick Piggin kfree(vb); 825db64fe02SNick Piggin free_vmap_area(va); 826db64fe02SNick Piggin return ERR_PTR(err); 827db64fe02SNick Piggin } 828db64fe02SNick Piggin 829db64fe02SNick Piggin spin_lock_init(&vb->lock); 830db64fe02SNick Piggin vb->va = va; 831db64fe02SNick Piggin vb->free = VMAP_BBMAP_BITS; 832db64fe02SNick Piggin vb->dirty = 0; 833db64fe02SNick Piggin bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 834db64fe02SNick Piggin bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 835db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 836db64fe02SNick Piggin 837db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 838db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 839db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 840db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 841db64fe02SNick Piggin BUG_ON(err); 842db64fe02SNick Piggin radix_tree_preload_end(); 843db64fe02SNick Piggin 844db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 845db64fe02SNick Piggin vb->vbq = vbq; 846db64fe02SNick Piggin spin_lock(&vbq->lock); 847de560423SNick Piggin list_add_rcu(&vb->free_list, &vbq->free); 848db64fe02SNick Piggin spin_unlock(&vbq->lock); 8493f04ba85STejun Heo put_cpu_var(vmap_block_queue); 850db64fe02SNick Piggin 851db64fe02SNick Piggin return vb; 852db64fe02SNick Piggin } 853db64fe02SNick Piggin 854db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 855db64fe02SNick Piggin { 856db64fe02SNick Piggin struct vmap_block *tmp; 857db64fe02SNick Piggin unsigned long vb_idx; 858db64fe02SNick Piggin 859db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 860db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 861db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 862db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 863db64fe02SNick Piggin BUG_ON(tmp != vb); 864db64fe02SNick Piggin 86564141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 86622a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 867db64fe02SNick Piggin } 868db64fe02SNick Piggin 86902b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 87002b709dfSNick Piggin { 87102b709dfSNick Piggin LIST_HEAD(purge); 87202b709dfSNick Piggin struct vmap_block *vb; 87302b709dfSNick Piggin struct vmap_block *n_vb; 87402b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 87502b709dfSNick Piggin 87602b709dfSNick Piggin rcu_read_lock(); 87702b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 87802b709dfSNick Piggin 87902b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 88002b709dfSNick Piggin continue; 88102b709dfSNick Piggin 88202b709dfSNick Piggin spin_lock(&vb->lock); 88302b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 88402b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 88502b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 88602b709dfSNick Piggin bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS); 88702b709dfSNick Piggin bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); 88802b709dfSNick Piggin spin_lock(&vbq->lock); 88902b709dfSNick Piggin list_del_rcu(&vb->free_list); 89002b709dfSNick Piggin spin_unlock(&vbq->lock); 89102b709dfSNick Piggin spin_unlock(&vb->lock); 89202b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 89302b709dfSNick Piggin } else 89402b709dfSNick Piggin spin_unlock(&vb->lock); 89502b709dfSNick Piggin } 89602b709dfSNick Piggin rcu_read_unlock(); 89702b709dfSNick Piggin 89802b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 89902b709dfSNick Piggin list_del(&vb->purge); 90002b709dfSNick Piggin free_vmap_block(vb); 90102b709dfSNick Piggin } 90202b709dfSNick Piggin } 90302b709dfSNick Piggin 90402b709dfSNick Piggin static void purge_fragmented_blocks_thiscpu(void) 90502b709dfSNick Piggin { 90602b709dfSNick Piggin purge_fragmented_blocks(smp_processor_id()); 90702b709dfSNick Piggin } 90802b709dfSNick Piggin 90902b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 91002b709dfSNick Piggin { 91102b709dfSNick Piggin int cpu; 91202b709dfSNick Piggin 91302b709dfSNick Piggin for_each_possible_cpu(cpu) 91402b709dfSNick Piggin purge_fragmented_blocks(cpu); 91502b709dfSNick Piggin } 91602b709dfSNick Piggin 917db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 918db64fe02SNick Piggin { 919db64fe02SNick Piggin struct vmap_block_queue *vbq; 920db64fe02SNick Piggin struct vmap_block *vb; 921db64fe02SNick Piggin unsigned long addr = 0; 922db64fe02SNick Piggin unsigned int order; 92302b709dfSNick Piggin int purge = 0; 924db64fe02SNick Piggin 925db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 926db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 927aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 928aa91c4d8SJan Kara /* 929aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 930aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 931aa91c4d8SJan Kara * early. 932aa91c4d8SJan Kara */ 933aa91c4d8SJan Kara return NULL; 934aa91c4d8SJan Kara } 935db64fe02SNick Piggin order = get_order(size); 936db64fe02SNick Piggin 937db64fe02SNick Piggin again: 938db64fe02SNick Piggin rcu_read_lock(); 939db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 940db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 941db64fe02SNick Piggin int i; 942db64fe02SNick Piggin 943db64fe02SNick Piggin spin_lock(&vb->lock); 94402b709dfSNick Piggin if (vb->free < 1UL << order) 94502b709dfSNick Piggin goto next; 94602b709dfSNick Piggin 947db64fe02SNick Piggin i = bitmap_find_free_region(vb->alloc_map, 948db64fe02SNick Piggin VMAP_BBMAP_BITS, order); 949db64fe02SNick Piggin 95002b709dfSNick Piggin if (i < 0) { 95102b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS) { 95202b709dfSNick Piggin /* fragmented and no outstanding allocations */ 95302b709dfSNick Piggin BUG_ON(vb->dirty != VMAP_BBMAP_BITS); 95402b709dfSNick Piggin purge = 1; 95502b709dfSNick Piggin } 95602b709dfSNick Piggin goto next; 95702b709dfSNick Piggin } 958db64fe02SNick Piggin addr = vb->va->va_start + (i << PAGE_SHIFT); 959db64fe02SNick Piggin BUG_ON(addr_to_vb_idx(addr) != 960db64fe02SNick Piggin addr_to_vb_idx(vb->va->va_start)); 961db64fe02SNick Piggin vb->free -= 1UL << order; 962db64fe02SNick Piggin if (vb->free == 0) { 963db64fe02SNick Piggin spin_lock(&vbq->lock); 964de560423SNick Piggin list_del_rcu(&vb->free_list); 965db64fe02SNick Piggin spin_unlock(&vbq->lock); 966db64fe02SNick Piggin } 967db64fe02SNick Piggin spin_unlock(&vb->lock); 968db64fe02SNick Piggin break; 96902b709dfSNick Piggin next: 970db64fe02SNick Piggin spin_unlock(&vb->lock); 971db64fe02SNick Piggin } 97202b709dfSNick Piggin 97302b709dfSNick Piggin if (purge) 97402b709dfSNick Piggin purge_fragmented_blocks_thiscpu(); 97502b709dfSNick Piggin 9763f04ba85STejun Heo put_cpu_var(vmap_block_queue); 977db64fe02SNick Piggin rcu_read_unlock(); 978db64fe02SNick Piggin 979db64fe02SNick Piggin if (!addr) { 980db64fe02SNick Piggin vb = new_vmap_block(gfp_mask); 981db64fe02SNick Piggin if (IS_ERR(vb)) 982db64fe02SNick Piggin return vb; 983db64fe02SNick Piggin goto again; 984db64fe02SNick Piggin } 985db64fe02SNick Piggin 986db64fe02SNick Piggin return (void *)addr; 987db64fe02SNick Piggin } 988db64fe02SNick Piggin 989db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 990db64fe02SNick Piggin { 991db64fe02SNick Piggin unsigned long offset; 992db64fe02SNick Piggin unsigned long vb_idx; 993db64fe02SNick Piggin unsigned int order; 994db64fe02SNick Piggin struct vmap_block *vb; 995db64fe02SNick Piggin 996db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 997db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 998b29acbdcSNick Piggin 999b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 1000b29acbdcSNick Piggin 1001db64fe02SNick Piggin order = get_order(size); 1002db64fe02SNick Piggin 1003db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 1004db64fe02SNick Piggin 1005db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 1006db64fe02SNick Piggin rcu_read_lock(); 1007db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 1008db64fe02SNick Piggin rcu_read_unlock(); 1009db64fe02SNick Piggin BUG_ON(!vb); 1010db64fe02SNick Piggin 101164141da5SJeremy Fitzhardinge vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 101264141da5SJeremy Fitzhardinge 1013db64fe02SNick Piggin spin_lock(&vb->lock); 1014de560423SNick Piggin BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 1015d086817dSMinChan Kim 1016db64fe02SNick Piggin vb->dirty += 1UL << order; 1017db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 1018de560423SNick Piggin BUG_ON(vb->free); 1019db64fe02SNick Piggin spin_unlock(&vb->lock); 1020db64fe02SNick Piggin free_vmap_block(vb); 1021db64fe02SNick Piggin } else 1022db64fe02SNick Piggin spin_unlock(&vb->lock); 1023db64fe02SNick Piggin } 1024db64fe02SNick Piggin 1025db64fe02SNick Piggin /** 1026db64fe02SNick Piggin * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 1027db64fe02SNick Piggin * 1028db64fe02SNick Piggin * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1029db64fe02SNick Piggin * to amortize TLB flushing overheads. What this means is that any page you 1030db64fe02SNick Piggin * have now, may, in a former life, have been mapped into kernel virtual 1031db64fe02SNick Piggin * address by the vmap layer and so there might be some CPUs with TLB entries 1032db64fe02SNick Piggin * still referencing that page (additional to the regular 1:1 kernel mapping). 1033db64fe02SNick Piggin * 1034db64fe02SNick Piggin * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1035db64fe02SNick Piggin * be sure that none of the pages we have control over will have any aliases 1036db64fe02SNick Piggin * from the vmap layer. 1037db64fe02SNick Piggin */ 1038db64fe02SNick Piggin void vm_unmap_aliases(void) 1039db64fe02SNick Piggin { 1040db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 1041db64fe02SNick Piggin int cpu; 1042db64fe02SNick Piggin int flush = 0; 1043db64fe02SNick Piggin 10449b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 10459b463334SJeremy Fitzhardinge return; 10469b463334SJeremy Fitzhardinge 1047db64fe02SNick Piggin for_each_possible_cpu(cpu) { 1048db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1049db64fe02SNick Piggin struct vmap_block *vb; 1050db64fe02SNick Piggin 1051db64fe02SNick Piggin rcu_read_lock(); 1052db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1053db64fe02SNick Piggin int i; 1054db64fe02SNick Piggin 1055db64fe02SNick Piggin spin_lock(&vb->lock); 1056db64fe02SNick Piggin i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 1057db64fe02SNick Piggin while (i < VMAP_BBMAP_BITS) { 1058db64fe02SNick Piggin unsigned long s, e; 1059db64fe02SNick Piggin int j; 1060db64fe02SNick Piggin j = find_next_zero_bit(vb->dirty_map, 1061db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 1062db64fe02SNick Piggin 1063db64fe02SNick Piggin s = vb->va->va_start + (i << PAGE_SHIFT); 1064db64fe02SNick Piggin e = vb->va->va_start + (j << PAGE_SHIFT); 1065db64fe02SNick Piggin flush = 1; 1066db64fe02SNick Piggin 1067db64fe02SNick Piggin if (s < start) 1068db64fe02SNick Piggin start = s; 1069db64fe02SNick Piggin if (e > end) 1070db64fe02SNick Piggin end = e; 1071db64fe02SNick Piggin 1072db64fe02SNick Piggin i = j; 1073db64fe02SNick Piggin i = find_next_bit(vb->dirty_map, 1074db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 1075db64fe02SNick Piggin } 1076db64fe02SNick Piggin spin_unlock(&vb->lock); 1077db64fe02SNick Piggin } 1078db64fe02SNick Piggin rcu_read_unlock(); 1079db64fe02SNick Piggin } 1080db64fe02SNick Piggin 1081db64fe02SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, flush); 1082db64fe02SNick Piggin } 1083db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1084db64fe02SNick Piggin 1085db64fe02SNick Piggin /** 1086db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1087db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 1088db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1089db64fe02SNick Piggin */ 1090db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 1091db64fe02SNick Piggin { 1092db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 1093db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 1094db64fe02SNick Piggin 1095db64fe02SNick Piggin BUG_ON(!addr); 1096db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 1097db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 1098db64fe02SNick Piggin BUG_ON(addr & (PAGE_SIZE-1)); 1099db64fe02SNick Piggin 1100db64fe02SNick Piggin debug_check_no_locks_freed(mem, size); 1101cd52858cSNick Piggin vmap_debug_free_range(addr, addr+size); 1102db64fe02SNick Piggin 1103db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) 1104db64fe02SNick Piggin vb_free(mem, size); 1105db64fe02SNick Piggin else 1106db64fe02SNick Piggin free_unmap_vmap_area_addr(addr); 1107db64fe02SNick Piggin } 1108db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 1109db64fe02SNick Piggin 1110db64fe02SNick Piggin /** 1111db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1112db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 1113db64fe02SNick Piggin * @count: number of pages 1114db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 1115db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1116e99c97adSRandy Dunlap * 1117e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 1118db64fe02SNick Piggin */ 1119db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1120db64fe02SNick Piggin { 1121db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 1122db64fe02SNick Piggin unsigned long addr; 1123db64fe02SNick Piggin void *mem; 1124db64fe02SNick Piggin 1125db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 1126db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 1127db64fe02SNick Piggin if (IS_ERR(mem)) 1128db64fe02SNick Piggin return NULL; 1129db64fe02SNick Piggin addr = (unsigned long)mem; 1130db64fe02SNick Piggin } else { 1131db64fe02SNick Piggin struct vmap_area *va; 1132db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 1133db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1134db64fe02SNick Piggin if (IS_ERR(va)) 1135db64fe02SNick Piggin return NULL; 1136db64fe02SNick Piggin 1137db64fe02SNick Piggin addr = va->va_start; 1138db64fe02SNick Piggin mem = (void *)addr; 1139db64fe02SNick Piggin } 1140db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1141db64fe02SNick Piggin vm_unmap_ram(mem, count); 1142db64fe02SNick Piggin return NULL; 1143db64fe02SNick Piggin } 1144db64fe02SNick Piggin return mem; 1145db64fe02SNick Piggin } 1146db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 1147db64fe02SNick Piggin 1148f0aa6617STejun Heo /** 1149be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 1150be9b7335SNicolas Pitre * @vm: vm_struct to add 1151be9b7335SNicolas Pitre * 1152be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 1153be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1154be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 1155be9b7335SNicolas Pitre * 1156be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1157be9b7335SNicolas Pitre */ 1158be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 1159be9b7335SNicolas Pitre { 1160be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 1161be9b7335SNicolas Pitre 1162be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 1163be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1164be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 1165be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 1166be9b7335SNicolas Pitre break; 1167be9b7335SNicolas Pitre } else 1168be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 1169be9b7335SNicolas Pitre } 1170be9b7335SNicolas Pitre vm->next = *p; 1171be9b7335SNicolas Pitre *p = vm; 1172be9b7335SNicolas Pitre } 1173be9b7335SNicolas Pitre 1174be9b7335SNicolas Pitre /** 1175f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 1176f0aa6617STejun Heo * @vm: vm_struct to register 1177c0c0a293STejun Heo * @align: requested alignment 1178f0aa6617STejun Heo * 1179f0aa6617STejun Heo * This function is used to register kernel vm area before 1180f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 1181f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 1182f0aa6617STejun Heo * vm->addr contains the allocated address. 1183f0aa6617STejun Heo * 1184f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1185f0aa6617STejun Heo */ 1186c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1187f0aa6617STejun Heo { 1188f0aa6617STejun Heo static size_t vm_init_off __initdata; 1189c0c0a293STejun Heo unsigned long addr; 1190f0aa6617STejun Heo 1191c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 1192c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1193c0c0a293STejun Heo 1194c0c0a293STejun Heo vm->addr = (void *)addr; 1195f0aa6617STejun Heo 1196be9b7335SNicolas Pitre vm_area_add_early(vm); 1197f0aa6617STejun Heo } 1198f0aa6617STejun Heo 1199db64fe02SNick Piggin void __init vmalloc_init(void) 1200db64fe02SNick Piggin { 1201822c18f2SIvan Kokshaysky struct vmap_area *va; 1202822c18f2SIvan Kokshaysky struct vm_struct *tmp; 1203db64fe02SNick Piggin int i; 1204db64fe02SNick Piggin 1205db64fe02SNick Piggin for_each_possible_cpu(i) { 1206db64fe02SNick Piggin struct vmap_block_queue *vbq; 1207*32fcfd40SAl Viro struct vfree_deferred *p; 1208db64fe02SNick Piggin 1209db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1210db64fe02SNick Piggin spin_lock_init(&vbq->lock); 1211db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 1212*32fcfd40SAl Viro p = &per_cpu(vfree_deferred, i); 1213*32fcfd40SAl Viro init_llist_head(&p->list); 1214*32fcfd40SAl Viro INIT_WORK(&p->wq, free_work); 1215db64fe02SNick Piggin } 12169b463334SJeremy Fitzhardinge 1217822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1218822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 121943ebdac4SPekka Enberg va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1220dbda591dSKyongHo va->flags = VM_VM_AREA; 1221822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1222822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1223dbda591dSKyongHo va->vm = tmp; 1224822c18f2SIvan Kokshaysky __insert_vmap_area(va); 1225822c18f2SIvan Kokshaysky } 1226ca23e405STejun Heo 1227ca23e405STejun Heo vmap_area_pcpu_hole = VMALLOC_END; 1228ca23e405STejun Heo 12299b463334SJeremy Fitzhardinge vmap_initialized = true; 1230db64fe02SNick Piggin } 1231db64fe02SNick Piggin 12328fc48985STejun Heo /** 12338fc48985STejun Heo * map_kernel_range_noflush - map kernel VM area with the specified pages 12348fc48985STejun Heo * @addr: start of the VM area to map 12358fc48985STejun Heo * @size: size of the VM area to map 12368fc48985STejun Heo * @prot: page protection flags to use 12378fc48985STejun Heo * @pages: pages to map 12388fc48985STejun Heo * 12398fc48985STejun Heo * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 12408fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 12418fc48985STejun Heo * friends. 12428fc48985STejun Heo * 12438fc48985STejun Heo * NOTE: 12448fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 12458fc48985STejun Heo * responsible for calling flush_cache_vmap() on to-be-mapped areas 12468fc48985STejun Heo * before calling this function. 12478fc48985STejun Heo * 12488fc48985STejun Heo * RETURNS: 12498fc48985STejun Heo * The number of pages mapped on success, -errno on failure. 12508fc48985STejun Heo */ 12518fc48985STejun Heo int map_kernel_range_noflush(unsigned long addr, unsigned long size, 12528fc48985STejun Heo pgprot_t prot, struct page **pages) 12538fc48985STejun Heo { 12548fc48985STejun Heo return vmap_page_range_noflush(addr, addr + size, prot, pages); 12558fc48985STejun Heo } 12568fc48985STejun Heo 12578fc48985STejun Heo /** 12588fc48985STejun Heo * unmap_kernel_range_noflush - unmap kernel VM area 12598fc48985STejun Heo * @addr: start of the VM area to unmap 12608fc48985STejun Heo * @size: size of the VM area to unmap 12618fc48985STejun Heo * 12628fc48985STejun Heo * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 12638fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 12648fc48985STejun Heo * friends. 12658fc48985STejun Heo * 12668fc48985STejun Heo * NOTE: 12678fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 12688fc48985STejun Heo * responsible for calling flush_cache_vunmap() on to-be-mapped areas 12698fc48985STejun Heo * before calling this function and flush_tlb_kernel_range() after. 12708fc48985STejun Heo */ 12718fc48985STejun Heo void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 12728fc48985STejun Heo { 12738fc48985STejun Heo vunmap_page_range(addr, addr + size); 12748fc48985STejun Heo } 127581e88fdcSHuang Ying EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 12768fc48985STejun Heo 12778fc48985STejun Heo /** 12788fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 12798fc48985STejun Heo * @addr: start of the VM area to unmap 12808fc48985STejun Heo * @size: size of the VM area to unmap 12818fc48985STejun Heo * 12828fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 12838fc48985STejun Heo * the unmapping and tlb after. 12848fc48985STejun Heo */ 1285db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 1286db64fe02SNick Piggin { 1287db64fe02SNick Piggin unsigned long end = addr + size; 1288f6fcba70STejun Heo 1289f6fcba70STejun Heo flush_cache_vunmap(addr, end); 1290db64fe02SNick Piggin vunmap_page_range(addr, end); 1291db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 1292db64fe02SNick Piggin } 1293db64fe02SNick Piggin 1294db64fe02SNick Piggin int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1295db64fe02SNick Piggin { 1296db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 1297db64fe02SNick Piggin unsigned long end = addr + area->size - PAGE_SIZE; 1298db64fe02SNick Piggin int err; 1299db64fe02SNick Piggin 1300db64fe02SNick Piggin err = vmap_page_range(addr, end, prot, *pages); 1301db64fe02SNick Piggin if (err > 0) { 1302db64fe02SNick Piggin *pages += err; 1303db64fe02SNick Piggin err = 0; 1304db64fe02SNick Piggin } 1305db64fe02SNick Piggin 1306db64fe02SNick Piggin return err; 1307db64fe02SNick Piggin } 1308db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 1309db64fe02SNick Piggin 1310db64fe02SNick Piggin /*** Old vmalloc interfaces ***/ 1311db64fe02SNick Piggin DEFINE_RWLOCK(vmlist_lock); 1312db64fe02SNick Piggin struct vm_struct *vmlist; 1313db64fe02SNick Piggin 1314f5252e00SMitsuo Hayasaka static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 13155e6cafc8SMarek Szyprowski unsigned long flags, const void *caller) 1316cf88c790STejun Heo { 1317cf88c790STejun Heo vm->flags = flags; 1318cf88c790STejun Heo vm->addr = (void *)va->va_start; 1319cf88c790STejun Heo vm->size = va->va_end - va->va_start; 1320cf88c790STejun Heo vm->caller = caller; 1321db1aecafSMinchan Kim va->vm = vm; 1322cf88c790STejun Heo va->flags |= VM_VM_AREA; 1323f5252e00SMitsuo Hayasaka } 1324cf88c790STejun Heo 1325f5252e00SMitsuo Hayasaka static void insert_vmalloc_vmlist(struct vm_struct *vm) 1326f5252e00SMitsuo Hayasaka { 1327f5252e00SMitsuo Hayasaka struct vm_struct *tmp, **p; 1328f5252e00SMitsuo Hayasaka 1329f5252e00SMitsuo Hayasaka vm->flags &= ~VM_UNLIST; 1330cf88c790STejun Heo write_lock(&vmlist_lock); 1331cf88c790STejun Heo for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1332cf88c790STejun Heo if (tmp->addr >= vm->addr) 1333cf88c790STejun Heo break; 1334cf88c790STejun Heo } 1335cf88c790STejun Heo vm->next = *p; 1336cf88c790STejun Heo *p = vm; 1337cf88c790STejun Heo write_unlock(&vmlist_lock); 1338cf88c790STejun Heo } 1339cf88c790STejun Heo 1340f5252e00SMitsuo Hayasaka static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 13415e6cafc8SMarek Szyprowski unsigned long flags, const void *caller) 1342f5252e00SMitsuo Hayasaka { 1343f5252e00SMitsuo Hayasaka setup_vmalloc_vm(vm, va, flags, caller); 1344f5252e00SMitsuo Hayasaka insert_vmalloc_vmlist(vm); 1345f5252e00SMitsuo Hayasaka } 1346f5252e00SMitsuo Hayasaka 1347db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 13482dca6999SDavid Miller unsigned long align, unsigned long flags, unsigned long start, 13495e6cafc8SMarek Szyprowski unsigned long end, int node, gfp_t gfp_mask, const void *caller) 1350db64fe02SNick Piggin { 13510006526dSKautuk Consul struct vmap_area *va; 1352db64fe02SNick Piggin struct vm_struct *area; 13531da177e4SLinus Torvalds 135452fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 13551da177e4SLinus Torvalds if (flags & VM_IOREMAP) { 13561da177e4SLinus Torvalds int bit = fls(size); 13571da177e4SLinus Torvalds 13581da177e4SLinus Torvalds if (bit > IOREMAP_MAX_ORDER) 13591da177e4SLinus Torvalds bit = IOREMAP_MAX_ORDER; 13601da177e4SLinus Torvalds else if (bit < PAGE_SHIFT) 13611da177e4SLinus Torvalds bit = PAGE_SHIFT; 13621da177e4SLinus Torvalds 13631da177e4SLinus Torvalds align = 1ul << bit; 13641da177e4SLinus Torvalds } 1365db64fe02SNick Piggin 13661da177e4SLinus Torvalds size = PAGE_ALIGN(size); 136731be8309SOGAWA Hirofumi if (unlikely(!size)) 136831be8309SOGAWA Hirofumi return NULL; 13691da177e4SLinus Torvalds 1370cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 13711da177e4SLinus Torvalds if (unlikely(!area)) 13721da177e4SLinus Torvalds return NULL; 13731da177e4SLinus Torvalds 13741da177e4SLinus Torvalds /* 13751da177e4SLinus Torvalds * We always allocate a guard page. 13761da177e4SLinus Torvalds */ 13771da177e4SLinus Torvalds size += PAGE_SIZE; 13781da177e4SLinus Torvalds 1379db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1380db64fe02SNick Piggin if (IS_ERR(va)) { 1381db64fe02SNick Piggin kfree(area); 1382db64fe02SNick Piggin return NULL; 13831da177e4SLinus Torvalds } 13841da177e4SLinus Torvalds 1385f5252e00SMitsuo Hayasaka /* 1386f5252e00SMitsuo Hayasaka * When this function is called from __vmalloc_node_range, 1387f5252e00SMitsuo Hayasaka * we do not add vm_struct to vmlist here to avoid 1388f5252e00SMitsuo Hayasaka * accessing uninitialized members of vm_struct such as 1389f5252e00SMitsuo Hayasaka * pages and nr_pages fields. They will be set later. 1390f5252e00SMitsuo Hayasaka * To distinguish it from others, we use a VM_UNLIST flag. 1391f5252e00SMitsuo Hayasaka */ 1392f5252e00SMitsuo Hayasaka if (flags & VM_UNLIST) 1393f5252e00SMitsuo Hayasaka setup_vmalloc_vm(area, va, flags, caller); 1394f5252e00SMitsuo Hayasaka else 1395cf88c790STejun Heo insert_vmalloc_vm(area, va, flags, caller); 1396f5252e00SMitsuo Hayasaka 13971da177e4SLinus Torvalds return area; 13981da177e4SLinus Torvalds } 13991da177e4SLinus Torvalds 1400930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1401930fc45aSChristoph Lameter unsigned long start, unsigned long end) 1402930fc45aSChristoph Lameter { 140300ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 140400ef2d2fSDavid Rientjes GFP_KERNEL, __builtin_return_address(0)); 1405930fc45aSChristoph Lameter } 14065992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 1407930fc45aSChristoph Lameter 1408c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1409c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 14105e6cafc8SMarek Szyprowski const void *caller) 1411c2968612SBenjamin Herrenschmidt { 141200ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 141300ef2d2fSDavid Rientjes GFP_KERNEL, caller); 1414c2968612SBenjamin Herrenschmidt } 1415c2968612SBenjamin Herrenschmidt 14161da177e4SLinus Torvalds /** 1417183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 14181da177e4SLinus Torvalds * @size: size of the area 14191da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 14201da177e4SLinus Torvalds * 14211da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 14221da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 14231da177e4SLinus Torvalds * on success or %NULL on failure. 14241da177e4SLinus Torvalds */ 14251da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 14261da177e4SLinus Torvalds { 14272dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 142800ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 142900ef2d2fSDavid Rientjes __builtin_return_address(0)); 143023016969SChristoph Lameter } 143123016969SChristoph Lameter 143223016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 14335e6cafc8SMarek Szyprowski const void *caller) 143423016969SChristoph Lameter { 14352dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 143600ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 14371da177e4SLinus Torvalds } 14381da177e4SLinus Torvalds 1439e9da6e99SMarek Szyprowski /** 1440e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 1441e9da6e99SMarek Szyprowski * @addr: base address 1442e9da6e99SMarek Szyprowski * 1443e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 1444e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 1445e9da6e99SMarek Szyprowski * pointer valid. 1446e9da6e99SMarek Szyprowski */ 1447e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 144883342314SNick Piggin { 1449db64fe02SNick Piggin struct vmap_area *va; 145083342314SNick Piggin 1451db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1452db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) 1453db1aecafSMinchan Kim return va->vm; 145483342314SNick Piggin 14557856dfebSAndi Kleen return NULL; 14567856dfebSAndi Kleen } 14577856dfebSAndi Kleen 14581da177e4SLinus Torvalds /** 1459183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 14601da177e4SLinus Torvalds * @addr: base address 14611da177e4SLinus Torvalds * 14621da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 14631da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 14647856dfebSAndi Kleen * on SMP machines, except for its size or flags. 14651da177e4SLinus Torvalds */ 1466b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 14671da177e4SLinus Torvalds { 1468db64fe02SNick Piggin struct vmap_area *va; 1469db64fe02SNick Piggin 1470db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1471db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) { 1472db1aecafSMinchan Kim struct vm_struct *vm = va->vm; 1473f5252e00SMitsuo Hayasaka 1474f5252e00SMitsuo Hayasaka if (!(vm->flags & VM_UNLIST)) { 1475db64fe02SNick Piggin struct vm_struct *tmp, **p; 1476dd32c279SKAMEZAWA Hiroyuki /* 1477f5252e00SMitsuo Hayasaka * remove from list and disallow access to 1478f5252e00SMitsuo Hayasaka * this vm_struct before unmap. (address range 1479f5252e00SMitsuo Hayasaka * confliction is maintained by vmap.) 1480dd32c279SKAMEZAWA Hiroyuki */ 14811da177e4SLinus Torvalds write_lock(&vmlist_lock); 1482db64fe02SNick Piggin for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) 1483db64fe02SNick Piggin ; 1484db64fe02SNick Piggin *p = tmp->next; 14851da177e4SLinus Torvalds write_unlock(&vmlist_lock); 1486f5252e00SMitsuo Hayasaka } 1487db64fe02SNick Piggin 1488dd32c279SKAMEZAWA Hiroyuki vmap_debug_free_range(va->va_start, va->va_end); 1489dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 1490dd32c279SKAMEZAWA Hiroyuki vm->size -= PAGE_SIZE; 1491dd32c279SKAMEZAWA Hiroyuki 1492db64fe02SNick Piggin return vm; 1493db64fe02SNick Piggin } 1494db64fe02SNick Piggin return NULL; 14951da177e4SLinus Torvalds } 14961da177e4SLinus Torvalds 1497b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 14981da177e4SLinus Torvalds { 14991da177e4SLinus Torvalds struct vm_struct *area; 15001da177e4SLinus Torvalds 15011da177e4SLinus Torvalds if (!addr) 15021da177e4SLinus Torvalds return; 15031da177e4SLinus Torvalds 15041da177e4SLinus Torvalds if ((PAGE_SIZE-1) & (unsigned long)addr) { 15054c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 15061da177e4SLinus Torvalds return; 15071da177e4SLinus Torvalds } 15081da177e4SLinus Torvalds 15091da177e4SLinus Torvalds area = remove_vm_area(addr); 15101da177e4SLinus Torvalds if (unlikely(!area)) { 15114c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 15121da177e4SLinus Torvalds addr); 15131da177e4SLinus Torvalds return; 15141da177e4SLinus Torvalds } 15151da177e4SLinus Torvalds 15169a11b49aSIngo Molnar debug_check_no_locks_freed(addr, area->size); 15173ac7fe5aSThomas Gleixner debug_check_no_obj_freed(addr, area->size); 15189a11b49aSIngo Molnar 15191da177e4SLinus Torvalds if (deallocate_pages) { 15201da177e4SLinus Torvalds int i; 15211da177e4SLinus Torvalds 15221da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1523bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 1524bf53d6f8SChristoph Lameter 1525bf53d6f8SChristoph Lameter BUG_ON(!page); 1526bf53d6f8SChristoph Lameter __free_page(page); 15271da177e4SLinus Torvalds } 15281da177e4SLinus Torvalds 15298757d5faSJan Kiszka if (area->flags & VM_VPAGES) 15301da177e4SLinus Torvalds vfree(area->pages); 15311da177e4SLinus Torvalds else 15321da177e4SLinus Torvalds kfree(area->pages); 15331da177e4SLinus Torvalds } 15341da177e4SLinus Torvalds 15351da177e4SLinus Torvalds kfree(area); 15361da177e4SLinus Torvalds return; 15371da177e4SLinus Torvalds } 15381da177e4SLinus Torvalds 15391da177e4SLinus Torvalds /** 15401da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 15411da177e4SLinus Torvalds * @addr: memory base address 15421da177e4SLinus Torvalds * 1543183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 154480e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 154580e93effSPekka Enberg * NULL, no operation is performed. 15461da177e4SLinus Torvalds * 1547*32fcfd40SAl Viro * Must not be called in NMI context (strictly speaking, only if we don't 1548*32fcfd40SAl Viro * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 1549*32fcfd40SAl Viro * conventions for vfree() arch-depenedent would be a really bad idea) 1550*32fcfd40SAl Viro * 15511da177e4SLinus Torvalds */ 1552b3bdda02SChristoph Lameter void vfree(const void *addr) 15531da177e4SLinus Torvalds { 1554*32fcfd40SAl Viro BUG_ON(in_nmi()); 155589219d37SCatalin Marinas 155689219d37SCatalin Marinas kmemleak_free(addr); 155789219d37SCatalin Marinas 1558*32fcfd40SAl Viro if (!addr) 1559*32fcfd40SAl Viro return; 1560*32fcfd40SAl Viro if (unlikely(in_interrupt())) { 1561*32fcfd40SAl Viro struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); 1562*32fcfd40SAl Viro llist_add((struct llist_node *)addr, &p->list); 1563*32fcfd40SAl Viro schedule_work(&p->wq); 1564*32fcfd40SAl Viro } else 15651da177e4SLinus Torvalds __vunmap(addr, 1); 15661da177e4SLinus Torvalds } 15671da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 15681da177e4SLinus Torvalds 15691da177e4SLinus Torvalds /** 15701da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 15711da177e4SLinus Torvalds * @addr: memory base address 15721da177e4SLinus Torvalds * 15731da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 15741da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 15751da177e4SLinus Torvalds * 157680e93effSPekka Enberg * Must not be called in interrupt context. 15771da177e4SLinus Torvalds */ 1578b3bdda02SChristoph Lameter void vunmap(const void *addr) 15791da177e4SLinus Torvalds { 15801da177e4SLinus Torvalds BUG_ON(in_interrupt()); 158134754b69SPeter Zijlstra might_sleep(); 1582*32fcfd40SAl Viro if (addr) 15831da177e4SLinus Torvalds __vunmap(addr, 0); 15841da177e4SLinus Torvalds } 15851da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 15861da177e4SLinus Torvalds 15871da177e4SLinus Torvalds /** 15881da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 15891da177e4SLinus Torvalds * @pages: array of page pointers 15901da177e4SLinus Torvalds * @count: number of pages to map 15911da177e4SLinus Torvalds * @flags: vm_area->flags 15921da177e4SLinus Torvalds * @prot: page protection for the mapping 15931da177e4SLinus Torvalds * 15941da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 15951da177e4SLinus Torvalds * space. 15961da177e4SLinus Torvalds */ 15971da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 15981da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 15991da177e4SLinus Torvalds { 16001da177e4SLinus Torvalds struct vm_struct *area; 16011da177e4SLinus Torvalds 160234754b69SPeter Zijlstra might_sleep(); 160334754b69SPeter Zijlstra 16044481374cSJan Beulich if (count > totalram_pages) 16051da177e4SLinus Torvalds return NULL; 16061da177e4SLinus Torvalds 160723016969SChristoph Lameter area = get_vm_area_caller((count << PAGE_SHIFT), flags, 160823016969SChristoph Lameter __builtin_return_address(0)); 16091da177e4SLinus Torvalds if (!area) 16101da177e4SLinus Torvalds return NULL; 161123016969SChristoph Lameter 16121da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) { 16131da177e4SLinus Torvalds vunmap(area->addr); 16141da177e4SLinus Torvalds return NULL; 16151da177e4SLinus Torvalds } 16161da177e4SLinus Torvalds 16171da177e4SLinus Torvalds return area->addr; 16181da177e4SLinus Torvalds } 16191da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 16201da177e4SLinus Torvalds 16212dca6999SDavid Miller static void *__vmalloc_node(unsigned long size, unsigned long align, 16222dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 16235e6cafc8SMarek Szyprowski int node, const void *caller); 1624e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 16255e6cafc8SMarek Szyprowski pgprot_t prot, int node, const void *caller) 16261da177e4SLinus Torvalds { 162722943ab1SDave Hansen const int order = 0; 16281da177e4SLinus Torvalds struct page **pages; 16291da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 1630976d6dfbSJan Beulich gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 16311da177e4SLinus Torvalds 16321da177e4SLinus Torvalds nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 16331da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 16341da177e4SLinus Torvalds 16351da177e4SLinus Torvalds area->nr_pages = nr_pages; 16361da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 16378757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 1638976d6dfbSJan Beulich pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, 163923016969SChristoph Lameter PAGE_KERNEL, node, caller); 16408757d5faSJan Kiszka area->flags |= VM_VPAGES; 1641286e1ea3SAndrew Morton } else { 1642976d6dfbSJan Beulich pages = kmalloc_node(array_size, nested_gfp, node); 1643286e1ea3SAndrew Morton } 16441da177e4SLinus Torvalds area->pages = pages; 164523016969SChristoph Lameter area->caller = caller; 16461da177e4SLinus Torvalds if (!area->pages) { 16471da177e4SLinus Torvalds remove_vm_area(area->addr); 16481da177e4SLinus Torvalds kfree(area); 16491da177e4SLinus Torvalds return NULL; 16501da177e4SLinus Torvalds } 16511da177e4SLinus Torvalds 16521da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1653bf53d6f8SChristoph Lameter struct page *page; 165422943ab1SDave Hansen gfp_t tmp_mask = gfp_mask | __GFP_NOWARN; 1655bf53d6f8SChristoph Lameter 1656930fc45aSChristoph Lameter if (node < 0) 165722943ab1SDave Hansen page = alloc_page(tmp_mask); 1658930fc45aSChristoph Lameter else 165922943ab1SDave Hansen page = alloc_pages_node(node, tmp_mask, order); 1660bf53d6f8SChristoph Lameter 1661bf53d6f8SChristoph Lameter if (unlikely(!page)) { 16621da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 16631da177e4SLinus Torvalds area->nr_pages = i; 16641da177e4SLinus Torvalds goto fail; 16651da177e4SLinus Torvalds } 1666bf53d6f8SChristoph Lameter area->pages[i] = page; 16671da177e4SLinus Torvalds } 16681da177e4SLinus Torvalds 16691da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) 16701da177e4SLinus Torvalds goto fail; 16711da177e4SLinus Torvalds return area->addr; 16721da177e4SLinus Torvalds 16731da177e4SLinus Torvalds fail: 16743ee9a4f0SJoe Perches warn_alloc_failed(gfp_mask, order, 16753ee9a4f0SJoe Perches "vmalloc: allocation failure, allocated %ld of %ld bytes\n", 167622943ab1SDave Hansen (area->nr_pages*PAGE_SIZE), area->size); 16771da177e4SLinus Torvalds vfree(area->addr); 16781da177e4SLinus Torvalds return NULL; 16791da177e4SLinus Torvalds } 16801da177e4SLinus Torvalds 1681d0a21265SDavid Rientjes /** 1682d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 1683d0a21265SDavid Rientjes * @size: allocation size 1684d0a21265SDavid Rientjes * @align: desired alignment 1685d0a21265SDavid Rientjes * @start: vm area range start 1686d0a21265SDavid Rientjes * @end: vm area range end 1687d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 1688d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 168900ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 1690d0a21265SDavid Rientjes * @caller: caller's return address 1691d0a21265SDavid Rientjes * 1692d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 1693d0a21265SDavid Rientjes * allocator with @gfp_mask flags. Map them into contiguous 1694d0a21265SDavid Rientjes * kernel virtual space, using a pagetable protection of @prot. 1695d0a21265SDavid Rientjes */ 1696d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 1697d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 16985e6cafc8SMarek Szyprowski pgprot_t prot, int node, const void *caller) 1699930fc45aSChristoph Lameter { 1700d0a21265SDavid Rientjes struct vm_struct *area; 1701d0a21265SDavid Rientjes void *addr; 1702d0a21265SDavid Rientjes unsigned long real_size = size; 1703d0a21265SDavid Rientjes 1704d0a21265SDavid Rientjes size = PAGE_ALIGN(size); 1705d0a21265SDavid Rientjes if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1706de7d2b56SJoe Perches goto fail; 1707d0a21265SDavid Rientjes 1708f5252e00SMitsuo Hayasaka area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, 1709f5252e00SMitsuo Hayasaka start, end, node, gfp_mask, caller); 1710d0a21265SDavid Rientjes if (!area) 1711de7d2b56SJoe Perches goto fail; 1712d0a21265SDavid Rientjes 1713d0a21265SDavid Rientjes addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); 17141368edf0SMel Gorman if (!addr) 17151368edf0SMel Gorman return NULL; 171689219d37SCatalin Marinas 171789219d37SCatalin Marinas /* 1718f5252e00SMitsuo Hayasaka * In this function, newly allocated vm_struct is not added 1719f5252e00SMitsuo Hayasaka * to vmlist at __get_vm_area_node(). so, it is added here. 1720f5252e00SMitsuo Hayasaka */ 1721f5252e00SMitsuo Hayasaka insert_vmalloc_vmlist(area); 1722f5252e00SMitsuo Hayasaka 1723f5252e00SMitsuo Hayasaka /* 172489219d37SCatalin Marinas * A ref_count = 3 is needed because the vm_struct and vmap_area 172589219d37SCatalin Marinas * structures allocated in the __get_vm_area_node() function contain 172689219d37SCatalin Marinas * references to the virtual address of the vmalloc'ed block. 172789219d37SCatalin Marinas */ 1728d0a21265SDavid Rientjes kmemleak_alloc(addr, real_size, 3, gfp_mask); 172989219d37SCatalin Marinas 173089219d37SCatalin Marinas return addr; 1731de7d2b56SJoe Perches 1732de7d2b56SJoe Perches fail: 1733de7d2b56SJoe Perches warn_alloc_failed(gfp_mask, 0, 1734de7d2b56SJoe Perches "vmalloc: allocation failure: %lu bytes\n", 1735de7d2b56SJoe Perches real_size); 1736de7d2b56SJoe Perches return NULL; 1737930fc45aSChristoph Lameter } 1738930fc45aSChristoph Lameter 17391da177e4SLinus Torvalds /** 1740930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 17411da177e4SLinus Torvalds * @size: allocation size 17422dca6999SDavid Miller * @align: desired alignment 17431da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 17441da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 174500ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 1746c85d194bSRandy Dunlap * @caller: caller's return address 17471da177e4SLinus Torvalds * 17481da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 17491da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 17501da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 17511da177e4SLinus Torvalds */ 17522dca6999SDavid Miller static void *__vmalloc_node(unsigned long size, unsigned long align, 17532dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 17545e6cafc8SMarek Szyprowski int node, const void *caller) 17551da177e4SLinus Torvalds { 1756d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 1757d0a21265SDavid Rientjes gfp_mask, prot, node, caller); 17581da177e4SLinus Torvalds } 17591da177e4SLinus Torvalds 1760930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1761930fc45aSChristoph Lameter { 176200ef2d2fSDavid Rientjes return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 176323016969SChristoph Lameter __builtin_return_address(0)); 1764930fc45aSChristoph Lameter } 17651da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 17661da177e4SLinus Torvalds 1767e1ca7788SDave Young static inline void *__vmalloc_node_flags(unsigned long size, 1768e1ca7788SDave Young int node, gfp_t flags) 1769e1ca7788SDave Young { 1770e1ca7788SDave Young return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 1771e1ca7788SDave Young node, __builtin_return_address(0)); 1772e1ca7788SDave Young } 1773e1ca7788SDave Young 17741da177e4SLinus Torvalds /** 17751da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 17761da177e4SLinus Torvalds * @size: allocation size 17771da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 17781da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 17791da177e4SLinus Torvalds * 1780c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 17811da177e4SLinus Torvalds * use __vmalloc() instead. 17821da177e4SLinus Torvalds */ 17831da177e4SLinus Torvalds void *vmalloc(unsigned long size) 17841da177e4SLinus Torvalds { 178500ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 178600ef2d2fSDavid Rientjes GFP_KERNEL | __GFP_HIGHMEM); 17871da177e4SLinus Torvalds } 17881da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 17891da177e4SLinus Torvalds 1790930fc45aSChristoph Lameter /** 1791e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 1792e1ca7788SDave Young * @size: allocation size 1793e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 1794e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 1795e1ca7788SDave Young * The memory allocated is set to zero. 1796e1ca7788SDave Young * 1797e1ca7788SDave Young * For tight control over page level allocator and protection flags 1798e1ca7788SDave Young * use __vmalloc() instead. 1799e1ca7788SDave Young */ 1800e1ca7788SDave Young void *vzalloc(unsigned long size) 1801e1ca7788SDave Young { 180200ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 1803e1ca7788SDave Young GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1804e1ca7788SDave Young } 1805e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 1806e1ca7788SDave Young 1807e1ca7788SDave Young /** 1808ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 180983342314SNick Piggin * @size: allocation size 1810ead04089SRolf Eike Beer * 1811ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 1812ead04089SRolf Eike Beer * without leaking data. 181383342314SNick Piggin */ 181483342314SNick Piggin void *vmalloc_user(unsigned long size) 181583342314SNick Piggin { 181683342314SNick Piggin struct vm_struct *area; 181783342314SNick Piggin void *ret; 181883342314SNick Piggin 18192dca6999SDavid Miller ret = __vmalloc_node(size, SHMLBA, 18202dca6999SDavid Miller GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 182100ef2d2fSDavid Rientjes PAGE_KERNEL, NUMA_NO_NODE, 182200ef2d2fSDavid Rientjes __builtin_return_address(0)); 18232b4ac44eSEric Dumazet if (ret) { 1824db64fe02SNick Piggin area = find_vm_area(ret); 182583342314SNick Piggin area->flags |= VM_USERMAP; 18262b4ac44eSEric Dumazet } 182783342314SNick Piggin return ret; 182883342314SNick Piggin } 182983342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 183083342314SNick Piggin 183183342314SNick Piggin /** 1832930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 1833930fc45aSChristoph Lameter * @size: allocation size 1834d44e0780SRandy Dunlap * @node: numa node 1835930fc45aSChristoph Lameter * 1836930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 1837930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 1838930fc45aSChristoph Lameter * 1839c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 1840930fc45aSChristoph Lameter * use __vmalloc() instead. 1841930fc45aSChristoph Lameter */ 1842930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 1843930fc45aSChristoph Lameter { 18442dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 184523016969SChristoph Lameter node, __builtin_return_address(0)); 1846930fc45aSChristoph Lameter } 1847930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 1848930fc45aSChristoph Lameter 1849e1ca7788SDave Young /** 1850e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 1851e1ca7788SDave Young * @size: allocation size 1852e1ca7788SDave Young * @node: numa node 1853e1ca7788SDave Young * 1854e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 1855e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 1856e1ca7788SDave Young * The memory allocated is set to zero. 1857e1ca7788SDave Young * 1858e1ca7788SDave Young * For tight control over page level allocator and protection flags 1859e1ca7788SDave Young * use __vmalloc_node() instead. 1860e1ca7788SDave Young */ 1861e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 1862e1ca7788SDave Young { 1863e1ca7788SDave Young return __vmalloc_node_flags(size, node, 1864e1ca7788SDave Young GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1865e1ca7788SDave Young } 1866e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 1867e1ca7788SDave Young 18684dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC 18694dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL 18704dc3b16bSPavel Pisa #endif 18714dc3b16bSPavel Pisa 18721da177e4SLinus Torvalds /** 18731da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 18741da177e4SLinus Torvalds * @size: allocation size 18751da177e4SLinus Torvalds * 18761da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 18771da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 18781da177e4SLinus Torvalds * executable kernel virtual space. 18791da177e4SLinus Torvalds * 1880c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 18811da177e4SLinus Torvalds * use __vmalloc() instead. 18821da177e4SLinus Torvalds */ 18831da177e4SLinus Torvalds 18841da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 18851da177e4SLinus Torvalds { 18862dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 188700ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 18881da177e4SLinus Torvalds } 18891da177e4SLinus Torvalds 18900d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 18917ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 18920d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 18937ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 18940d08e0d3SAndi Kleen #else 18950d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL 18960d08e0d3SAndi Kleen #endif 18970d08e0d3SAndi Kleen 18981da177e4SLinus Torvalds /** 18991da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 19001da177e4SLinus Torvalds * @size: allocation size 19011da177e4SLinus Torvalds * 19021da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 19031da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 19041da177e4SLinus Torvalds */ 19051da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 19061da177e4SLinus Torvalds { 19072dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 190800ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 19091da177e4SLinus Torvalds } 19101da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 19111da177e4SLinus Torvalds 191283342314SNick Piggin /** 1913ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 191483342314SNick Piggin * @size: allocation size 1915ead04089SRolf Eike Beer * 1916ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 1917ead04089SRolf Eike Beer * mapped to userspace without leaking data. 191883342314SNick Piggin */ 191983342314SNick Piggin void *vmalloc_32_user(unsigned long size) 192083342314SNick Piggin { 192183342314SNick Piggin struct vm_struct *area; 192283342314SNick Piggin void *ret; 192383342314SNick Piggin 19242dca6999SDavid Miller ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 192500ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 19262b4ac44eSEric Dumazet if (ret) { 1927db64fe02SNick Piggin area = find_vm_area(ret); 192883342314SNick Piggin area->flags |= VM_USERMAP; 19292b4ac44eSEric Dumazet } 193083342314SNick Piggin return ret; 193183342314SNick Piggin } 193283342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 193383342314SNick Piggin 1934d0107eb0SKAMEZAWA Hiroyuki /* 1935d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 1936d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 1937d0107eb0SKAMEZAWA Hiroyuki */ 1938d0107eb0SKAMEZAWA Hiroyuki 1939d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 1940d0107eb0SKAMEZAWA Hiroyuki { 1941d0107eb0SKAMEZAWA Hiroyuki struct page *p; 1942d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 1943d0107eb0SKAMEZAWA Hiroyuki 1944d0107eb0SKAMEZAWA Hiroyuki while (count) { 1945d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 1946d0107eb0SKAMEZAWA Hiroyuki 1947d0107eb0SKAMEZAWA Hiroyuki offset = (unsigned long)addr & ~PAGE_MASK; 1948d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 1949d0107eb0SKAMEZAWA Hiroyuki if (length > count) 1950d0107eb0SKAMEZAWA Hiroyuki length = count; 1951d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 1952d0107eb0SKAMEZAWA Hiroyuki /* 1953d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 1954d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 1955d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 1956d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 1957d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 1958d0107eb0SKAMEZAWA Hiroyuki */ 1959d0107eb0SKAMEZAWA Hiroyuki if (p) { 1960d0107eb0SKAMEZAWA Hiroyuki /* 1961d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 1962d0107eb0SKAMEZAWA Hiroyuki * function description) 1963d0107eb0SKAMEZAWA Hiroyuki */ 19649b04c5feSCong Wang void *map = kmap_atomic(p); 1965d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 19669b04c5feSCong Wang kunmap_atomic(map); 1967d0107eb0SKAMEZAWA Hiroyuki } else 1968d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 1969d0107eb0SKAMEZAWA Hiroyuki 1970d0107eb0SKAMEZAWA Hiroyuki addr += length; 1971d0107eb0SKAMEZAWA Hiroyuki buf += length; 1972d0107eb0SKAMEZAWA Hiroyuki copied += length; 1973d0107eb0SKAMEZAWA Hiroyuki count -= length; 1974d0107eb0SKAMEZAWA Hiroyuki } 1975d0107eb0SKAMEZAWA Hiroyuki return copied; 1976d0107eb0SKAMEZAWA Hiroyuki } 1977d0107eb0SKAMEZAWA Hiroyuki 1978d0107eb0SKAMEZAWA Hiroyuki static int aligned_vwrite(char *buf, char *addr, unsigned long count) 1979d0107eb0SKAMEZAWA Hiroyuki { 1980d0107eb0SKAMEZAWA Hiroyuki struct page *p; 1981d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 1982d0107eb0SKAMEZAWA Hiroyuki 1983d0107eb0SKAMEZAWA Hiroyuki while (count) { 1984d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 1985d0107eb0SKAMEZAWA Hiroyuki 1986d0107eb0SKAMEZAWA Hiroyuki offset = (unsigned long)addr & ~PAGE_MASK; 1987d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 1988d0107eb0SKAMEZAWA Hiroyuki if (length > count) 1989d0107eb0SKAMEZAWA Hiroyuki length = count; 1990d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 1991d0107eb0SKAMEZAWA Hiroyuki /* 1992d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 1993d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 1994d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 1995d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 1996d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 1997d0107eb0SKAMEZAWA Hiroyuki */ 1998d0107eb0SKAMEZAWA Hiroyuki if (p) { 1999d0107eb0SKAMEZAWA Hiroyuki /* 2000d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2001d0107eb0SKAMEZAWA Hiroyuki * function description) 2002d0107eb0SKAMEZAWA Hiroyuki */ 20039b04c5feSCong Wang void *map = kmap_atomic(p); 2004d0107eb0SKAMEZAWA Hiroyuki memcpy(map + offset, buf, length); 20059b04c5feSCong Wang kunmap_atomic(map); 2006d0107eb0SKAMEZAWA Hiroyuki } 2007d0107eb0SKAMEZAWA Hiroyuki addr += length; 2008d0107eb0SKAMEZAWA Hiroyuki buf += length; 2009d0107eb0SKAMEZAWA Hiroyuki copied += length; 2010d0107eb0SKAMEZAWA Hiroyuki count -= length; 2011d0107eb0SKAMEZAWA Hiroyuki } 2012d0107eb0SKAMEZAWA Hiroyuki return copied; 2013d0107eb0SKAMEZAWA Hiroyuki } 2014d0107eb0SKAMEZAWA Hiroyuki 2015d0107eb0SKAMEZAWA Hiroyuki /** 2016d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 2017d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 2018d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2019d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2020d0107eb0SKAMEZAWA Hiroyuki * 2021d0107eb0SKAMEZAWA Hiroyuki * Returns # of bytes which addr and buf should be increased. 2022d0107eb0SKAMEZAWA Hiroyuki * (same number to @count). Returns 0 if [addr...addr+count) doesn't 2023d0107eb0SKAMEZAWA Hiroyuki * includes any intersect with alive vmalloc area. 2024d0107eb0SKAMEZAWA Hiroyuki * 2025d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2026d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 2027d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 2028d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 2029d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2030d0107eb0SKAMEZAWA Hiroyuki * 2031d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2032a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2033d0107eb0SKAMEZAWA Hiroyuki * 2034d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 2035d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2036d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2037d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2038d0107eb0SKAMEZAWA Hiroyuki * 2039d0107eb0SKAMEZAWA Hiroyuki */ 2040d0107eb0SKAMEZAWA Hiroyuki 20411da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 20421da177e4SLinus Torvalds { 20431da177e4SLinus Torvalds struct vm_struct *tmp; 20441da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 2045d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 20461da177e4SLinus Torvalds unsigned long n; 20471da177e4SLinus Torvalds 20481da177e4SLinus Torvalds /* Don't allow overflow */ 20491da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 20501da177e4SLinus Torvalds count = -(unsigned long) addr; 20511da177e4SLinus Torvalds 20521da177e4SLinus Torvalds read_lock(&vmlist_lock); 2053d0107eb0SKAMEZAWA Hiroyuki for (tmp = vmlist; count && tmp; tmp = tmp->next) { 20541da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 20551da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 20561da177e4SLinus Torvalds continue; 20571da177e4SLinus Torvalds while (addr < vaddr) { 20581da177e4SLinus Torvalds if (count == 0) 20591da177e4SLinus Torvalds goto finished; 20601da177e4SLinus Torvalds *buf = '\0'; 20611da177e4SLinus Torvalds buf++; 20621da177e4SLinus Torvalds addr++; 20631da177e4SLinus Torvalds count--; 20641da177e4SLinus Torvalds } 20651da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 2066d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2067d0107eb0SKAMEZAWA Hiroyuki n = count; 2068d0107eb0SKAMEZAWA Hiroyuki if (!(tmp->flags & VM_IOREMAP)) 2069d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 2070d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 2071d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 2072d0107eb0SKAMEZAWA Hiroyuki buf += n; 2073d0107eb0SKAMEZAWA Hiroyuki addr += n; 2074d0107eb0SKAMEZAWA Hiroyuki count -= n; 20751da177e4SLinus Torvalds } 20761da177e4SLinus Torvalds finished: 20771da177e4SLinus Torvalds read_unlock(&vmlist_lock); 2078d0107eb0SKAMEZAWA Hiroyuki 2079d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 2080d0107eb0SKAMEZAWA Hiroyuki return 0; 2081d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 2082d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 2083d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 2084d0107eb0SKAMEZAWA Hiroyuki 2085d0107eb0SKAMEZAWA Hiroyuki return buflen; 20861da177e4SLinus Torvalds } 20871da177e4SLinus Torvalds 2088d0107eb0SKAMEZAWA Hiroyuki /** 2089d0107eb0SKAMEZAWA Hiroyuki * vwrite() - write vmalloc area in a safe way. 2090d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for source data 2091d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2092d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2093d0107eb0SKAMEZAWA Hiroyuki * 2094d0107eb0SKAMEZAWA Hiroyuki * Returns # of bytes which addr and buf should be incresed. 2095d0107eb0SKAMEZAWA Hiroyuki * (same number to @count). 2096d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersect with valid 2097d0107eb0SKAMEZAWA Hiroyuki * vmalloc area, returns 0. 2098d0107eb0SKAMEZAWA Hiroyuki * 2099d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2100d0107eb0SKAMEZAWA Hiroyuki * copy data from a buffer to the given addr. If specified range of 2101d0107eb0SKAMEZAWA Hiroyuki * [addr...addr+count) includes some valid address, data is copied from 2102d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, no copy to hole. 2103d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2104d0107eb0SKAMEZAWA Hiroyuki * 2105d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2106a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2107d0107eb0SKAMEZAWA Hiroyuki * 2108d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vwrite() is never necessary because the caller 2109d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2110d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2111d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2112d0107eb0SKAMEZAWA Hiroyuki */ 2113d0107eb0SKAMEZAWA Hiroyuki 21141da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 21151da177e4SLinus Torvalds { 21161da177e4SLinus Torvalds struct vm_struct *tmp; 2117d0107eb0SKAMEZAWA Hiroyuki char *vaddr; 2118d0107eb0SKAMEZAWA Hiroyuki unsigned long n, buflen; 2119d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 21201da177e4SLinus Torvalds 21211da177e4SLinus Torvalds /* Don't allow overflow */ 21221da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 21231da177e4SLinus Torvalds count = -(unsigned long) addr; 2124d0107eb0SKAMEZAWA Hiroyuki buflen = count; 21251da177e4SLinus Torvalds 21261da177e4SLinus Torvalds read_lock(&vmlist_lock); 2127d0107eb0SKAMEZAWA Hiroyuki for (tmp = vmlist; count && tmp; tmp = tmp->next) { 21281da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 21291da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 21301da177e4SLinus Torvalds continue; 21311da177e4SLinus Torvalds while (addr < vaddr) { 21321da177e4SLinus Torvalds if (count == 0) 21331da177e4SLinus Torvalds goto finished; 21341da177e4SLinus Torvalds buf++; 21351da177e4SLinus Torvalds addr++; 21361da177e4SLinus Torvalds count--; 21371da177e4SLinus Torvalds } 21381da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 2139d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2140d0107eb0SKAMEZAWA Hiroyuki n = count; 2141d0107eb0SKAMEZAWA Hiroyuki if (!(tmp->flags & VM_IOREMAP)) { 2142d0107eb0SKAMEZAWA Hiroyuki aligned_vwrite(buf, addr, n); 2143d0107eb0SKAMEZAWA Hiroyuki copied++; 2144d0107eb0SKAMEZAWA Hiroyuki } 2145d0107eb0SKAMEZAWA Hiroyuki buf += n; 2146d0107eb0SKAMEZAWA Hiroyuki addr += n; 2147d0107eb0SKAMEZAWA Hiroyuki count -= n; 21481da177e4SLinus Torvalds } 21491da177e4SLinus Torvalds finished: 21501da177e4SLinus Torvalds read_unlock(&vmlist_lock); 2151d0107eb0SKAMEZAWA Hiroyuki if (!copied) 2152d0107eb0SKAMEZAWA Hiroyuki return 0; 2153d0107eb0SKAMEZAWA Hiroyuki return buflen; 21541da177e4SLinus Torvalds } 215583342314SNick Piggin 215683342314SNick Piggin /** 215783342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 215883342314SNick Piggin * @vma: vma to cover (map full range of vma) 215983342314SNick Piggin * @addr: vmalloc memory 216083342314SNick Piggin * @pgoff: number of pages into addr before first page to map 21617682486bSRandy Dunlap * 21627682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 216383342314SNick Piggin * 216483342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 216583342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 216683342314SNick Piggin * that criteria isn't met. 216783342314SNick Piggin * 216872fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 216983342314SNick Piggin */ 217083342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 217183342314SNick Piggin unsigned long pgoff) 217283342314SNick Piggin { 217383342314SNick Piggin struct vm_struct *area; 217483342314SNick Piggin unsigned long uaddr = vma->vm_start; 217583342314SNick Piggin unsigned long usize = vma->vm_end - vma->vm_start; 217683342314SNick Piggin 217783342314SNick Piggin if ((PAGE_SIZE-1) & (unsigned long)addr) 217883342314SNick Piggin return -EINVAL; 217983342314SNick Piggin 2180db64fe02SNick Piggin area = find_vm_area(addr); 218183342314SNick Piggin if (!area) 2182db64fe02SNick Piggin return -EINVAL; 218383342314SNick Piggin 218483342314SNick Piggin if (!(area->flags & VM_USERMAP)) 2185db64fe02SNick Piggin return -EINVAL; 218683342314SNick Piggin 218783342314SNick Piggin if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 2188db64fe02SNick Piggin return -EINVAL; 218983342314SNick Piggin 219083342314SNick Piggin addr += pgoff << PAGE_SHIFT; 219183342314SNick Piggin do { 219283342314SNick Piggin struct page *page = vmalloc_to_page(addr); 2193db64fe02SNick Piggin int ret; 2194db64fe02SNick Piggin 219583342314SNick Piggin ret = vm_insert_page(vma, uaddr, page); 219683342314SNick Piggin if (ret) 219783342314SNick Piggin return ret; 219883342314SNick Piggin 219983342314SNick Piggin uaddr += PAGE_SIZE; 220083342314SNick Piggin addr += PAGE_SIZE; 220183342314SNick Piggin usize -= PAGE_SIZE; 220283342314SNick Piggin } while (usize > 0); 220383342314SNick Piggin 2204314e51b9SKonstantin Khlebnikov vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 220583342314SNick Piggin 2206db64fe02SNick Piggin return 0; 220783342314SNick Piggin } 220883342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 220983342314SNick Piggin 22101eeb66a1SChristoph Hellwig /* 22111eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 22121eeb66a1SChristoph Hellwig * have one. 22131eeb66a1SChristoph Hellwig */ 22141eeb66a1SChristoph Hellwig void __attribute__((weak)) vmalloc_sync_all(void) 22151eeb66a1SChristoph Hellwig { 22161eeb66a1SChristoph Hellwig } 22175f4352fbSJeremy Fitzhardinge 22185f4352fbSJeremy Fitzhardinge 22192f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 22205f4352fbSJeremy Fitzhardinge { 2221cd12909cSDavid Vrabel pte_t ***p = data; 2222cd12909cSDavid Vrabel 2223cd12909cSDavid Vrabel if (p) { 2224cd12909cSDavid Vrabel *(*p) = pte; 2225cd12909cSDavid Vrabel (*p)++; 2226cd12909cSDavid Vrabel } 22275f4352fbSJeremy Fitzhardinge return 0; 22285f4352fbSJeremy Fitzhardinge } 22295f4352fbSJeremy Fitzhardinge 22305f4352fbSJeremy Fitzhardinge /** 22315f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 22325f4352fbSJeremy Fitzhardinge * @size: size of the area 2233cd12909cSDavid Vrabel * @ptes: returns the PTEs for the address space 22347682486bSRandy Dunlap * 22357682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 22365f4352fbSJeremy Fitzhardinge * 22375f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 22385f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 2239cd12909cSDavid Vrabel * are created. 2240cd12909cSDavid Vrabel * 2241cd12909cSDavid Vrabel * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 2242cd12909cSDavid Vrabel * allocated for the VM area are returned. 22435f4352fbSJeremy Fitzhardinge */ 2244cd12909cSDavid Vrabel struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 22455f4352fbSJeremy Fitzhardinge { 22465f4352fbSJeremy Fitzhardinge struct vm_struct *area; 22475f4352fbSJeremy Fitzhardinge 224823016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 224923016969SChristoph Lameter __builtin_return_address(0)); 22505f4352fbSJeremy Fitzhardinge if (area == NULL) 22515f4352fbSJeremy Fitzhardinge return NULL; 22525f4352fbSJeremy Fitzhardinge 22535f4352fbSJeremy Fitzhardinge /* 22545f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 22555f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 22565f4352fbSJeremy Fitzhardinge */ 22575f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2258cd12909cSDavid Vrabel size, f, ptes ? &ptes : NULL)) { 22595f4352fbSJeremy Fitzhardinge free_vm_area(area); 22605f4352fbSJeremy Fitzhardinge return NULL; 22615f4352fbSJeremy Fitzhardinge } 22625f4352fbSJeremy Fitzhardinge 22635f4352fbSJeremy Fitzhardinge return area; 22645f4352fbSJeremy Fitzhardinge } 22655f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 22665f4352fbSJeremy Fitzhardinge 22675f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 22685f4352fbSJeremy Fitzhardinge { 22695f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 22705f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 22715f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 22725f4352fbSJeremy Fitzhardinge kfree(area); 22735f4352fbSJeremy Fitzhardinge } 22745f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 2275a10aa579SChristoph Lameter 22764f8b02b4STejun Heo #ifdef CONFIG_SMP 2277ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 2278ca23e405STejun Heo { 2279ca23e405STejun Heo return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; 2280ca23e405STejun Heo } 2281ca23e405STejun Heo 2282ca23e405STejun Heo /** 2283ca23e405STejun Heo * pvm_find_next_prev - find the next and prev vmap_area surrounding @end 2284ca23e405STejun Heo * @end: target address 2285ca23e405STejun Heo * @pnext: out arg for the next vmap_area 2286ca23e405STejun Heo * @pprev: out arg for the previous vmap_area 2287ca23e405STejun Heo * 2288ca23e405STejun Heo * Returns: %true if either or both of next and prev are found, 2289ca23e405STejun Heo * %false if no vmap_area exists 2290ca23e405STejun Heo * 2291ca23e405STejun Heo * Find vmap_areas end addresses of which enclose @end. ie. if not 2292ca23e405STejun Heo * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. 2293ca23e405STejun Heo */ 2294ca23e405STejun Heo static bool pvm_find_next_prev(unsigned long end, 2295ca23e405STejun Heo struct vmap_area **pnext, 2296ca23e405STejun Heo struct vmap_area **pprev) 2297ca23e405STejun Heo { 2298ca23e405STejun Heo struct rb_node *n = vmap_area_root.rb_node; 2299ca23e405STejun Heo struct vmap_area *va = NULL; 2300ca23e405STejun Heo 2301ca23e405STejun Heo while (n) { 2302ca23e405STejun Heo va = rb_entry(n, struct vmap_area, rb_node); 2303ca23e405STejun Heo if (end < va->va_end) 2304ca23e405STejun Heo n = n->rb_left; 2305ca23e405STejun Heo else if (end > va->va_end) 2306ca23e405STejun Heo n = n->rb_right; 2307ca23e405STejun Heo else 2308ca23e405STejun Heo break; 2309ca23e405STejun Heo } 2310ca23e405STejun Heo 2311ca23e405STejun Heo if (!va) 2312ca23e405STejun Heo return false; 2313ca23e405STejun Heo 2314ca23e405STejun Heo if (va->va_end > end) { 2315ca23e405STejun Heo *pnext = va; 2316ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2317ca23e405STejun Heo } else { 2318ca23e405STejun Heo *pprev = va; 2319ca23e405STejun Heo *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); 2320ca23e405STejun Heo } 2321ca23e405STejun Heo return true; 2322ca23e405STejun Heo } 2323ca23e405STejun Heo 2324ca23e405STejun Heo /** 2325ca23e405STejun Heo * pvm_determine_end - find the highest aligned address between two vmap_areas 2326ca23e405STejun Heo * @pnext: in/out arg for the next vmap_area 2327ca23e405STejun Heo * @pprev: in/out arg for the previous vmap_area 2328ca23e405STejun Heo * @align: alignment 2329ca23e405STejun Heo * 2330ca23e405STejun Heo * Returns: determined end address 2331ca23e405STejun Heo * 2332ca23e405STejun Heo * Find the highest aligned address between *@pnext and *@pprev below 2333ca23e405STejun Heo * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned 2334ca23e405STejun Heo * down address is between the end addresses of the two vmap_areas. 2335ca23e405STejun Heo * 2336ca23e405STejun Heo * Please note that the address returned by this function may fall 2337ca23e405STejun Heo * inside *@pnext vmap_area. The caller is responsible for checking 2338ca23e405STejun Heo * that. 2339ca23e405STejun Heo */ 2340ca23e405STejun Heo static unsigned long pvm_determine_end(struct vmap_area **pnext, 2341ca23e405STejun Heo struct vmap_area **pprev, 2342ca23e405STejun Heo unsigned long align) 2343ca23e405STejun Heo { 2344ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2345ca23e405STejun Heo unsigned long addr; 2346ca23e405STejun Heo 2347ca23e405STejun Heo if (*pnext) 2348ca23e405STejun Heo addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); 2349ca23e405STejun Heo else 2350ca23e405STejun Heo addr = vmalloc_end; 2351ca23e405STejun Heo 2352ca23e405STejun Heo while (*pprev && (*pprev)->va_end > addr) { 2353ca23e405STejun Heo *pnext = *pprev; 2354ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2355ca23e405STejun Heo } 2356ca23e405STejun Heo 2357ca23e405STejun Heo return addr; 2358ca23e405STejun Heo } 2359ca23e405STejun Heo 2360ca23e405STejun Heo /** 2361ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 2362ca23e405STejun Heo * @offsets: array containing offset of each area 2363ca23e405STejun Heo * @sizes: array containing size of each area 2364ca23e405STejun Heo * @nr_vms: the number of areas to allocate 2365ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2366ca23e405STejun Heo * 2367ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2368ca23e405STejun Heo * vm_structs on success, %NULL on failure 2369ca23e405STejun Heo * 2370ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 2371ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 2372ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 2373ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 2374ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 2375ec3f64fcSDavid Rientjes * areas are allocated from top. 2376ca23e405STejun Heo * 2377ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 2378ca23e405STejun Heo * does everything top-down and scans areas from the end looking for 2379ca23e405STejun Heo * matching slot. While scanning, if any of the areas overlaps with 2380ca23e405STejun Heo * existing vmap_area, the base address is pulled down to fit the 2381ca23e405STejun Heo * area. Scanning is repeated till all the areas fit and then all 2382ca23e405STejun Heo * necessary data structres are inserted and the result is returned. 2383ca23e405STejun Heo */ 2384ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2385ca23e405STejun Heo const size_t *sizes, int nr_vms, 2386ec3f64fcSDavid Rientjes size_t align) 2387ca23e405STejun Heo { 2388ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2389ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2390ca23e405STejun Heo struct vmap_area **vas, *prev, *next; 2391ca23e405STejun Heo struct vm_struct **vms; 2392ca23e405STejun Heo int area, area2, last_area, term_area; 2393ca23e405STejun Heo unsigned long base, start, end, last_end; 2394ca23e405STejun Heo bool purged = false; 2395ca23e405STejun Heo 2396ca23e405STejun Heo /* verify parameters and allocate data structures */ 2397ca23e405STejun Heo BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); 2398ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 2399ca23e405STejun Heo start = offsets[area]; 2400ca23e405STejun Heo end = start + sizes[area]; 2401ca23e405STejun Heo 2402ca23e405STejun Heo /* is everything aligned properly? */ 2403ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 2404ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 2405ca23e405STejun Heo 2406ca23e405STejun Heo /* detect the area with the highest address */ 2407ca23e405STejun Heo if (start > offsets[last_area]) 2408ca23e405STejun Heo last_area = area; 2409ca23e405STejun Heo 2410ca23e405STejun Heo for (area2 = 0; area2 < nr_vms; area2++) { 2411ca23e405STejun Heo unsigned long start2 = offsets[area2]; 2412ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 2413ca23e405STejun Heo 2414ca23e405STejun Heo if (area2 == area) 2415ca23e405STejun Heo continue; 2416ca23e405STejun Heo 2417ca23e405STejun Heo BUG_ON(start2 >= start && start2 < end); 2418ca23e405STejun Heo BUG_ON(end2 <= end && end2 > start); 2419ca23e405STejun Heo } 2420ca23e405STejun Heo } 2421ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 2422ca23e405STejun Heo 2423ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 2424ca23e405STejun Heo WARN_ON(true); 2425ca23e405STejun Heo return NULL; 2426ca23e405STejun Heo } 2427ca23e405STejun Heo 24284d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 24294d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 2430ca23e405STejun Heo if (!vas || !vms) 2431f1db7afdSKautuk Consul goto err_free2; 2432ca23e405STejun Heo 2433ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2434ec3f64fcSDavid Rientjes vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); 2435ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 2436ca23e405STejun Heo if (!vas[area] || !vms[area]) 2437ca23e405STejun Heo goto err_free; 2438ca23e405STejun Heo } 2439ca23e405STejun Heo retry: 2440ca23e405STejun Heo spin_lock(&vmap_area_lock); 2441ca23e405STejun Heo 2442ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 2443ca23e405STejun Heo area = term_area = last_area; 2444ca23e405STejun Heo start = offsets[area]; 2445ca23e405STejun Heo end = start + sizes[area]; 2446ca23e405STejun Heo 2447ca23e405STejun Heo if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { 2448ca23e405STejun Heo base = vmalloc_end - last_end; 2449ca23e405STejun Heo goto found; 2450ca23e405STejun Heo } 2451ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2452ca23e405STejun Heo 2453ca23e405STejun Heo while (true) { 2454ca23e405STejun Heo BUG_ON(next && next->va_end <= base + end); 2455ca23e405STejun Heo BUG_ON(prev && prev->va_end > base + end); 2456ca23e405STejun Heo 2457ca23e405STejun Heo /* 2458ca23e405STejun Heo * base might have underflowed, add last_end before 2459ca23e405STejun Heo * comparing. 2460ca23e405STejun Heo */ 2461ca23e405STejun Heo if (base + last_end < vmalloc_start + last_end) { 2462ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2463ca23e405STejun Heo if (!purged) { 2464ca23e405STejun Heo purge_vmap_area_lazy(); 2465ca23e405STejun Heo purged = true; 2466ca23e405STejun Heo goto retry; 2467ca23e405STejun Heo } 2468ca23e405STejun Heo goto err_free; 2469ca23e405STejun Heo } 2470ca23e405STejun Heo 2471ca23e405STejun Heo /* 2472ca23e405STejun Heo * If next overlaps, move base downwards so that it's 2473ca23e405STejun Heo * right below next and then recheck. 2474ca23e405STejun Heo */ 2475ca23e405STejun Heo if (next && next->va_start < base + end) { 2476ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2477ca23e405STejun Heo term_area = area; 2478ca23e405STejun Heo continue; 2479ca23e405STejun Heo } 2480ca23e405STejun Heo 2481ca23e405STejun Heo /* 2482ca23e405STejun Heo * If prev overlaps, shift down next and prev and move 2483ca23e405STejun Heo * base so that it's right below new next and then 2484ca23e405STejun Heo * recheck. 2485ca23e405STejun Heo */ 2486ca23e405STejun Heo if (prev && prev->va_end > base + start) { 2487ca23e405STejun Heo next = prev; 2488ca23e405STejun Heo prev = node_to_va(rb_prev(&next->rb_node)); 2489ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2490ca23e405STejun Heo term_area = area; 2491ca23e405STejun Heo continue; 2492ca23e405STejun Heo } 2493ca23e405STejun Heo 2494ca23e405STejun Heo /* 2495ca23e405STejun Heo * This area fits, move on to the previous one. If 2496ca23e405STejun Heo * the previous one is the terminal one, we're done. 2497ca23e405STejun Heo */ 2498ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 2499ca23e405STejun Heo if (area == term_area) 2500ca23e405STejun Heo break; 2501ca23e405STejun Heo start = offsets[area]; 2502ca23e405STejun Heo end = start + sizes[area]; 2503ca23e405STejun Heo pvm_find_next_prev(base + end, &next, &prev); 2504ca23e405STejun Heo } 2505ca23e405STejun Heo found: 2506ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 2507ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2508ca23e405STejun Heo struct vmap_area *va = vas[area]; 2509ca23e405STejun Heo 2510ca23e405STejun Heo va->va_start = base + offsets[area]; 2511ca23e405STejun Heo va->va_end = va->va_start + sizes[area]; 2512ca23e405STejun Heo __insert_vmap_area(va); 2513ca23e405STejun Heo } 2514ca23e405STejun Heo 2515ca23e405STejun Heo vmap_area_pcpu_hole = base + offsets[last_area]; 2516ca23e405STejun Heo 2517ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2518ca23e405STejun Heo 2519ca23e405STejun Heo /* insert all vm's */ 2520ca23e405STejun Heo for (area = 0; area < nr_vms; area++) 2521ca23e405STejun Heo insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2522ca23e405STejun Heo pcpu_get_vm_areas); 2523ca23e405STejun Heo 2524ca23e405STejun Heo kfree(vas); 2525ca23e405STejun Heo return vms; 2526ca23e405STejun Heo 2527ca23e405STejun Heo err_free: 2528ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2529ca23e405STejun Heo kfree(vas[area]); 2530ca23e405STejun Heo kfree(vms[area]); 2531ca23e405STejun Heo } 2532f1db7afdSKautuk Consul err_free2: 2533ca23e405STejun Heo kfree(vas); 2534ca23e405STejun Heo kfree(vms); 2535ca23e405STejun Heo return NULL; 2536ca23e405STejun Heo } 2537ca23e405STejun Heo 2538ca23e405STejun Heo /** 2539ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 2540ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 2541ca23e405STejun Heo * @nr_vms: the number of allocated areas 2542ca23e405STejun Heo * 2543ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 2544ca23e405STejun Heo */ 2545ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 2546ca23e405STejun Heo { 2547ca23e405STejun Heo int i; 2548ca23e405STejun Heo 2549ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 2550ca23e405STejun Heo free_vm_area(vms[i]); 2551ca23e405STejun Heo kfree(vms); 2552ca23e405STejun Heo } 25534f8b02b4STejun Heo #endif /* CONFIG_SMP */ 2554a10aa579SChristoph Lameter 2555a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 2556a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 2557e199b5d1SNamhyung Kim __acquires(&vmlist_lock) 2558a10aa579SChristoph Lameter { 2559a10aa579SChristoph Lameter loff_t n = *pos; 2560a10aa579SChristoph Lameter struct vm_struct *v; 2561a10aa579SChristoph Lameter 2562a10aa579SChristoph Lameter read_lock(&vmlist_lock); 2563a10aa579SChristoph Lameter v = vmlist; 2564a10aa579SChristoph Lameter while (n > 0 && v) { 2565a10aa579SChristoph Lameter n--; 2566a10aa579SChristoph Lameter v = v->next; 2567a10aa579SChristoph Lameter } 2568a10aa579SChristoph Lameter if (!n) 2569a10aa579SChristoph Lameter return v; 2570a10aa579SChristoph Lameter 2571a10aa579SChristoph Lameter return NULL; 2572a10aa579SChristoph Lameter 2573a10aa579SChristoph Lameter } 2574a10aa579SChristoph Lameter 2575a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 2576a10aa579SChristoph Lameter { 2577a10aa579SChristoph Lameter struct vm_struct *v = p; 2578a10aa579SChristoph Lameter 2579a10aa579SChristoph Lameter ++*pos; 2580a10aa579SChristoph Lameter return v->next; 2581a10aa579SChristoph Lameter } 2582a10aa579SChristoph Lameter 2583a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 2584e199b5d1SNamhyung Kim __releases(&vmlist_lock) 2585a10aa579SChristoph Lameter { 2586a10aa579SChristoph Lameter read_unlock(&vmlist_lock); 2587a10aa579SChristoph Lameter } 2588a10aa579SChristoph Lameter 2589a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 2590a47a126aSEric Dumazet { 2591e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 2592a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 2593a47a126aSEric Dumazet 2594a47a126aSEric Dumazet if (!counters) 2595a47a126aSEric Dumazet return; 2596a47a126aSEric Dumazet 2597a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2598a47a126aSEric Dumazet 2599a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 2600a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 2601a47a126aSEric Dumazet 2602a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 2603a47a126aSEric Dumazet if (counters[nr]) 2604a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 2605a47a126aSEric Dumazet } 2606a47a126aSEric Dumazet } 2607a47a126aSEric Dumazet 2608a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 2609a10aa579SChristoph Lameter { 2610a10aa579SChristoph Lameter struct vm_struct *v = p; 2611a10aa579SChristoph Lameter 261245ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 2613a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 2614a10aa579SChristoph Lameter 261562c70bceSJoe Perches if (v->caller) 261662c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 261723016969SChristoph Lameter 2618a10aa579SChristoph Lameter if (v->nr_pages) 2619a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 2620a10aa579SChristoph Lameter 2621a10aa579SChristoph Lameter if (v->phys_addr) 2622ffa71f33SKenji Kaneshige seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr); 2623a10aa579SChristoph Lameter 2624a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 2625a10aa579SChristoph Lameter seq_printf(m, " ioremap"); 2626a10aa579SChristoph Lameter 2627a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 2628a10aa579SChristoph Lameter seq_printf(m, " vmalloc"); 2629a10aa579SChristoph Lameter 2630a10aa579SChristoph Lameter if (v->flags & VM_MAP) 2631a10aa579SChristoph Lameter seq_printf(m, " vmap"); 2632a10aa579SChristoph Lameter 2633a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 2634a10aa579SChristoph Lameter seq_printf(m, " user"); 2635a10aa579SChristoph Lameter 2636a10aa579SChristoph Lameter if (v->flags & VM_VPAGES) 2637a10aa579SChristoph Lameter seq_printf(m, " vpages"); 2638a10aa579SChristoph Lameter 2639a47a126aSEric Dumazet show_numa_info(m, v); 2640a10aa579SChristoph Lameter seq_putc(m, '\n'); 2641a10aa579SChristoph Lameter return 0; 2642a10aa579SChristoph Lameter } 2643a10aa579SChristoph Lameter 26445f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 2645a10aa579SChristoph Lameter .start = s_start, 2646a10aa579SChristoph Lameter .next = s_next, 2647a10aa579SChristoph Lameter .stop = s_stop, 2648a10aa579SChristoph Lameter .show = s_show, 2649a10aa579SChristoph Lameter }; 26505f6a6a9cSAlexey Dobriyan 26515f6a6a9cSAlexey Dobriyan static int vmalloc_open(struct inode *inode, struct file *file) 26525f6a6a9cSAlexey Dobriyan { 26535f6a6a9cSAlexey Dobriyan unsigned int *ptr = NULL; 26545f6a6a9cSAlexey Dobriyan int ret; 26555f6a6a9cSAlexey Dobriyan 2656e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 26575f6a6a9cSAlexey Dobriyan ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 265851980ac9SKulikov Vasiliy if (ptr == NULL) 265951980ac9SKulikov Vasiliy return -ENOMEM; 266051980ac9SKulikov Vasiliy } 26615f6a6a9cSAlexey Dobriyan ret = seq_open(file, &vmalloc_op); 26625f6a6a9cSAlexey Dobriyan if (!ret) { 26635f6a6a9cSAlexey Dobriyan struct seq_file *m = file->private_data; 26645f6a6a9cSAlexey Dobriyan m->private = ptr; 26655f6a6a9cSAlexey Dobriyan } else 26665f6a6a9cSAlexey Dobriyan kfree(ptr); 26675f6a6a9cSAlexey Dobriyan return ret; 26685f6a6a9cSAlexey Dobriyan } 26695f6a6a9cSAlexey Dobriyan 26705f6a6a9cSAlexey Dobriyan static const struct file_operations proc_vmalloc_operations = { 26715f6a6a9cSAlexey Dobriyan .open = vmalloc_open, 26725f6a6a9cSAlexey Dobriyan .read = seq_read, 26735f6a6a9cSAlexey Dobriyan .llseek = seq_lseek, 26745f6a6a9cSAlexey Dobriyan .release = seq_release_private, 26755f6a6a9cSAlexey Dobriyan }; 26765f6a6a9cSAlexey Dobriyan 26775f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 26785f6a6a9cSAlexey Dobriyan { 26795f6a6a9cSAlexey Dobriyan proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 26805f6a6a9cSAlexey Dobriyan return 0; 26815f6a6a9cSAlexey Dobriyan } 26825f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 2683a10aa579SChristoph Lameter #endif 2684a10aa579SChristoph Lameter 2685