11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 15d43c36dcSAlexey Dobriyan #include <linux/sched.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/spinlock.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 213ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2223016969SChristoph Lameter #include <linux/kallsyms.h> 23db64fe02SNick Piggin #include <linux/list.h> 24db64fe02SNick Piggin #include <linux/rbtree.h> 25db64fe02SNick Piggin #include <linux/radix-tree.h> 26db64fe02SNick Piggin #include <linux/rcupdate.h> 27f0aa6617STejun Heo #include <linux/pfn.h> 2889219d37SCatalin Marinas #include <linux/kmemleak.h> 29db64fe02SNick Piggin #include <asm/atomic.h> 301da177e4SLinus Torvalds #include <asm/uaccess.h> 311da177e4SLinus Torvalds #include <asm/tlbflush.h> 322dca6999SDavid Miller #include <asm/shmparam.h> 331da177e4SLinus Torvalds 34db64fe02SNick Piggin /*** Page table manipulation functions ***/ 35b221385bSAdrian Bunk 361da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 371da177e4SLinus Torvalds { 381da177e4SLinus Torvalds pte_t *pte; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 411da177e4SLinus Torvalds do { 421da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 431da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 441da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 451da177e4SLinus Torvalds } 461da177e4SLinus Torvalds 47db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 481da177e4SLinus Torvalds { 491da177e4SLinus Torvalds pmd_t *pmd; 501da177e4SLinus Torvalds unsigned long next; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 531da177e4SLinus Torvalds do { 541da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 551da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 561da177e4SLinus Torvalds continue; 571da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 581da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 591da177e4SLinus Torvalds } 601da177e4SLinus Torvalds 61db64fe02SNick Piggin static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 621da177e4SLinus Torvalds { 631da177e4SLinus Torvalds pud_t *pud; 641da177e4SLinus Torvalds unsigned long next; 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 671da177e4SLinus Torvalds do { 681da177e4SLinus Torvalds next = pud_addr_end(addr, end); 691da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 701da177e4SLinus Torvalds continue; 711da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 721da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 731da177e4SLinus Torvalds } 741da177e4SLinus Torvalds 75db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 761da177e4SLinus Torvalds { 771da177e4SLinus Torvalds pgd_t *pgd; 781da177e4SLinus Torvalds unsigned long next; 791da177e4SLinus Torvalds 801da177e4SLinus Torvalds BUG_ON(addr >= end); 811da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 821da177e4SLinus Torvalds do { 831da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 841da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 851da177e4SLinus Torvalds continue; 861da177e4SLinus Torvalds vunmap_pud_range(pgd, addr, next); 871da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 881da177e4SLinus Torvalds } 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 91db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 921da177e4SLinus Torvalds { 931da177e4SLinus Torvalds pte_t *pte; 941da177e4SLinus Torvalds 95db64fe02SNick Piggin /* 96db64fe02SNick Piggin * nr is a running index into the array which helps higher level 97db64fe02SNick Piggin * callers keep track of where we're up to. 98db64fe02SNick Piggin */ 99db64fe02SNick Piggin 100872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1011da177e4SLinus Torvalds if (!pte) 1021da177e4SLinus Torvalds return -ENOMEM; 1031da177e4SLinus Torvalds do { 104db64fe02SNick Piggin struct page *page = pages[*nr]; 105db64fe02SNick Piggin 106db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 107db64fe02SNick Piggin return -EBUSY; 108db64fe02SNick Piggin if (WARN_ON(!page)) 1091da177e4SLinus Torvalds return -ENOMEM; 1101da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 111db64fe02SNick Piggin (*nr)++; 1121da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1131da177e4SLinus Torvalds return 0; 1141da177e4SLinus Torvalds } 1151da177e4SLinus Torvalds 116db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 117db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1181da177e4SLinus Torvalds { 1191da177e4SLinus Torvalds pmd_t *pmd; 1201da177e4SLinus Torvalds unsigned long next; 1211da177e4SLinus Torvalds 1221da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1231da177e4SLinus Torvalds if (!pmd) 1241da177e4SLinus Torvalds return -ENOMEM; 1251da177e4SLinus Torvalds do { 1261da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 127db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1281da177e4SLinus Torvalds return -ENOMEM; 1291da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1301da177e4SLinus Torvalds return 0; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 133db64fe02SNick Piggin static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 134db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1351da177e4SLinus Torvalds { 1361da177e4SLinus Torvalds pud_t *pud; 1371da177e4SLinus Torvalds unsigned long next; 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds pud = pud_alloc(&init_mm, pgd, addr); 1401da177e4SLinus Torvalds if (!pud) 1411da177e4SLinus Torvalds return -ENOMEM; 1421da177e4SLinus Torvalds do { 1431da177e4SLinus Torvalds next = pud_addr_end(addr, end); 144db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 1451da177e4SLinus Torvalds return -ENOMEM; 1461da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1471da177e4SLinus Torvalds return 0; 1481da177e4SLinus Torvalds } 1491da177e4SLinus Torvalds 150db64fe02SNick Piggin /* 151db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 152db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 153db64fe02SNick Piggin * 154db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 155db64fe02SNick Piggin */ 1568fc48985STejun Heo static int vmap_page_range_noflush(unsigned long start, unsigned long end, 157db64fe02SNick Piggin pgprot_t prot, struct page **pages) 1581da177e4SLinus Torvalds { 1591da177e4SLinus Torvalds pgd_t *pgd; 1601da177e4SLinus Torvalds unsigned long next; 1612e4e27c7SAdam Lackorzynski unsigned long addr = start; 162db64fe02SNick Piggin int err = 0; 163db64fe02SNick Piggin int nr = 0; 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds BUG_ON(addr >= end); 1661da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1671da177e4SLinus Torvalds do { 1681da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 169db64fe02SNick Piggin err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 1701da177e4SLinus Torvalds if (err) 171bf88c8c8SFigo.zhang return err; 1721da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 173db64fe02SNick Piggin 174db64fe02SNick Piggin return nr; 1751da177e4SLinus Torvalds } 1761da177e4SLinus Torvalds 1778fc48985STejun Heo static int vmap_page_range(unsigned long start, unsigned long end, 1788fc48985STejun Heo pgprot_t prot, struct page **pages) 1798fc48985STejun Heo { 1808fc48985STejun Heo int ret; 1818fc48985STejun Heo 1828fc48985STejun Heo ret = vmap_page_range_noflush(start, end, prot, pages); 1838fc48985STejun Heo flush_cache_vmap(start, end); 1848fc48985STejun Heo return ret; 1858fc48985STejun Heo } 1868fc48985STejun Heo 18781ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 18873bdf0a6SLinus Torvalds { 18973bdf0a6SLinus Torvalds /* 190ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 19173bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 19273bdf0a6SLinus Torvalds * just put it in the vmalloc space. 19373bdf0a6SLinus Torvalds */ 19473bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 19573bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 19673bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 19773bdf0a6SLinus Torvalds return 1; 19873bdf0a6SLinus Torvalds #endif 19973bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 20073bdf0a6SLinus Torvalds } 20173bdf0a6SLinus Torvalds 20248667e7aSChristoph Lameter /* 203db64fe02SNick Piggin * Walk a vmap address to the struct page it maps. 20448667e7aSChristoph Lameter */ 205b3bdda02SChristoph Lameter struct page *vmalloc_to_page(const void *vmalloc_addr) 20648667e7aSChristoph Lameter { 20748667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 20848667e7aSChristoph Lameter struct page *page = NULL; 20948667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 21048667e7aSChristoph Lameter 2117aa413deSIngo Molnar /* 2127aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2137aa413deSIngo Molnar * architectures that do not vmalloc module space 2147aa413deSIngo Molnar */ 21573bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 21659ea7463SJiri Slaby 21748667e7aSChristoph Lameter if (!pgd_none(*pgd)) { 218db64fe02SNick Piggin pud_t *pud = pud_offset(pgd, addr); 21948667e7aSChristoph Lameter if (!pud_none(*pud)) { 220db64fe02SNick Piggin pmd_t *pmd = pmd_offset(pud, addr); 22148667e7aSChristoph Lameter if (!pmd_none(*pmd)) { 222db64fe02SNick Piggin pte_t *ptep, pte; 223db64fe02SNick Piggin 22448667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 22548667e7aSChristoph Lameter pte = *ptep; 22648667e7aSChristoph Lameter if (pte_present(pte)) 22748667e7aSChristoph Lameter page = pte_page(pte); 22848667e7aSChristoph Lameter pte_unmap(ptep); 22948667e7aSChristoph Lameter } 23048667e7aSChristoph Lameter } 23148667e7aSChristoph Lameter } 23248667e7aSChristoph Lameter return page; 23348667e7aSChristoph Lameter } 23448667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_page); 23548667e7aSChristoph Lameter 23648667e7aSChristoph Lameter /* 23748667e7aSChristoph Lameter * Map a vmalloc()-space virtual address to the physical page frame number. 23848667e7aSChristoph Lameter */ 239b3bdda02SChristoph Lameter unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 24048667e7aSChristoph Lameter { 24148667e7aSChristoph Lameter return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 24248667e7aSChristoph Lameter } 24348667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_pfn); 24448667e7aSChristoph Lameter 245db64fe02SNick Piggin 246db64fe02SNick Piggin /*** Global kva allocator ***/ 247db64fe02SNick Piggin 248db64fe02SNick Piggin #define VM_LAZY_FREE 0x01 249db64fe02SNick Piggin #define VM_LAZY_FREEING 0x02 250db64fe02SNick Piggin #define VM_VM_AREA 0x04 251db64fe02SNick Piggin 252db64fe02SNick Piggin struct vmap_area { 253db64fe02SNick Piggin unsigned long va_start; 254db64fe02SNick Piggin unsigned long va_end; 255db64fe02SNick Piggin unsigned long flags; 256db64fe02SNick Piggin struct rb_node rb_node; /* address sorted rbtree */ 257db64fe02SNick Piggin struct list_head list; /* address sorted list */ 258db64fe02SNick Piggin struct list_head purge_list; /* "lazy purge" list */ 259db64fe02SNick Piggin void *private; 260db64fe02SNick Piggin struct rcu_head rcu_head; 261db64fe02SNick Piggin }; 262db64fe02SNick Piggin 263db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 264db64fe02SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 265db64fe02SNick Piggin static LIST_HEAD(vmap_area_list); 266ca23e405STejun Heo static unsigned long vmap_area_pcpu_hole; 267db64fe02SNick Piggin 268db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 2691da177e4SLinus Torvalds { 270db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 271db64fe02SNick Piggin 272db64fe02SNick Piggin while (n) { 273db64fe02SNick Piggin struct vmap_area *va; 274db64fe02SNick Piggin 275db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 276db64fe02SNick Piggin if (addr < va->va_start) 277db64fe02SNick Piggin n = n->rb_left; 278db64fe02SNick Piggin else if (addr > va->va_start) 279db64fe02SNick Piggin n = n->rb_right; 280db64fe02SNick Piggin else 281db64fe02SNick Piggin return va; 282db64fe02SNick Piggin } 283db64fe02SNick Piggin 284db64fe02SNick Piggin return NULL; 285db64fe02SNick Piggin } 286db64fe02SNick Piggin 287db64fe02SNick Piggin static void __insert_vmap_area(struct vmap_area *va) 288db64fe02SNick Piggin { 289db64fe02SNick Piggin struct rb_node **p = &vmap_area_root.rb_node; 290db64fe02SNick Piggin struct rb_node *parent = NULL; 291db64fe02SNick Piggin struct rb_node *tmp; 292db64fe02SNick Piggin 293db64fe02SNick Piggin while (*p) { 294170168d0SNamhyung Kim struct vmap_area *tmp_va; 295db64fe02SNick Piggin 296db64fe02SNick Piggin parent = *p; 297170168d0SNamhyung Kim tmp_va = rb_entry(parent, struct vmap_area, rb_node); 298170168d0SNamhyung Kim if (va->va_start < tmp_va->va_end) 299db64fe02SNick Piggin p = &(*p)->rb_left; 300170168d0SNamhyung Kim else if (va->va_end > tmp_va->va_start) 301db64fe02SNick Piggin p = &(*p)->rb_right; 302db64fe02SNick Piggin else 303db64fe02SNick Piggin BUG(); 304db64fe02SNick Piggin } 305db64fe02SNick Piggin 306db64fe02SNick Piggin rb_link_node(&va->rb_node, parent, p); 307db64fe02SNick Piggin rb_insert_color(&va->rb_node, &vmap_area_root); 308db64fe02SNick Piggin 309db64fe02SNick Piggin /* address-sort this list so it is usable like the vmlist */ 310db64fe02SNick Piggin tmp = rb_prev(&va->rb_node); 311db64fe02SNick Piggin if (tmp) { 312db64fe02SNick Piggin struct vmap_area *prev; 313db64fe02SNick Piggin prev = rb_entry(tmp, struct vmap_area, rb_node); 314db64fe02SNick Piggin list_add_rcu(&va->list, &prev->list); 315db64fe02SNick Piggin } else 316db64fe02SNick Piggin list_add_rcu(&va->list, &vmap_area_list); 317db64fe02SNick Piggin } 318db64fe02SNick Piggin 319db64fe02SNick Piggin static void purge_vmap_area_lazy(void); 320db64fe02SNick Piggin 321db64fe02SNick Piggin /* 322db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 323db64fe02SNick Piggin * vstart and vend. 324db64fe02SNick Piggin */ 325db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 326db64fe02SNick Piggin unsigned long align, 327db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 328db64fe02SNick Piggin int node, gfp_t gfp_mask) 329db64fe02SNick Piggin { 330db64fe02SNick Piggin struct vmap_area *va; 331db64fe02SNick Piggin struct rb_node *n; 3321da177e4SLinus Torvalds unsigned long addr; 333db64fe02SNick Piggin int purged = 0; 334db64fe02SNick Piggin 3357766970cSNick Piggin BUG_ON(!size); 336db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 337db64fe02SNick Piggin 338db64fe02SNick Piggin va = kmalloc_node(sizeof(struct vmap_area), 339db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 340db64fe02SNick Piggin if (unlikely(!va)) 341db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 342db64fe02SNick Piggin 343db64fe02SNick Piggin retry: 3440ae15132SGlauber Costa addr = ALIGN(vstart, align); 3450ae15132SGlauber Costa 346db64fe02SNick Piggin spin_lock(&vmap_area_lock); 3477766970cSNick Piggin if (addr + size - 1 < addr) 3487766970cSNick Piggin goto overflow; 3497766970cSNick Piggin 350db64fe02SNick Piggin /* XXX: could have a last_hole cache */ 351db64fe02SNick Piggin n = vmap_area_root.rb_node; 352db64fe02SNick Piggin if (n) { 353db64fe02SNick Piggin struct vmap_area *first = NULL; 354db64fe02SNick Piggin 355db64fe02SNick Piggin do { 356db64fe02SNick Piggin struct vmap_area *tmp; 357db64fe02SNick Piggin tmp = rb_entry(n, struct vmap_area, rb_node); 358db64fe02SNick Piggin if (tmp->va_end >= addr) { 359db64fe02SNick Piggin if (!first && tmp->va_start < addr + size) 360db64fe02SNick Piggin first = tmp; 361db64fe02SNick Piggin n = n->rb_left; 362db64fe02SNick Piggin } else { 363db64fe02SNick Piggin first = tmp; 364db64fe02SNick Piggin n = n->rb_right; 365db64fe02SNick Piggin } 366db64fe02SNick Piggin } while (n); 367db64fe02SNick Piggin 368db64fe02SNick Piggin if (!first) 369db64fe02SNick Piggin goto found; 370db64fe02SNick Piggin 371db64fe02SNick Piggin if (first->va_end < addr) { 372db64fe02SNick Piggin n = rb_next(&first->rb_node); 373db64fe02SNick Piggin if (n) 374db64fe02SNick Piggin first = rb_entry(n, struct vmap_area, rb_node); 375db64fe02SNick Piggin else 376db64fe02SNick Piggin goto found; 377db64fe02SNick Piggin } 378db64fe02SNick Piggin 379f011c2daSNick Piggin while (addr + size > first->va_start && addr + size <= vend) { 380db64fe02SNick Piggin addr = ALIGN(first->va_end + PAGE_SIZE, align); 3817766970cSNick Piggin if (addr + size - 1 < addr) 3827766970cSNick Piggin goto overflow; 383db64fe02SNick Piggin 384db64fe02SNick Piggin n = rb_next(&first->rb_node); 385db64fe02SNick Piggin if (n) 386db64fe02SNick Piggin first = rb_entry(n, struct vmap_area, rb_node); 387db64fe02SNick Piggin else 388db64fe02SNick Piggin goto found; 389db64fe02SNick Piggin } 390db64fe02SNick Piggin } 391db64fe02SNick Piggin found: 392db64fe02SNick Piggin if (addr + size > vend) { 3937766970cSNick Piggin overflow: 394db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 395db64fe02SNick Piggin if (!purged) { 396db64fe02SNick Piggin purge_vmap_area_lazy(); 397db64fe02SNick Piggin purged = 1; 398db64fe02SNick Piggin goto retry; 399db64fe02SNick Piggin } 400db64fe02SNick Piggin if (printk_ratelimit()) 401c1279c4eSGlauber Costa printk(KERN_WARNING 402c1279c4eSGlauber Costa "vmap allocation for size %lu failed: " 403c1279c4eSGlauber Costa "use vmalloc=<size> to increase size.\n", size); 4042498ce42SRalph Wuerthner kfree(va); 405db64fe02SNick Piggin return ERR_PTR(-EBUSY); 406db64fe02SNick Piggin } 407db64fe02SNick Piggin 408db64fe02SNick Piggin BUG_ON(addr & (align-1)); 409db64fe02SNick Piggin 410db64fe02SNick Piggin va->va_start = addr; 411db64fe02SNick Piggin va->va_end = addr + size; 412db64fe02SNick Piggin va->flags = 0; 413db64fe02SNick Piggin __insert_vmap_area(va); 414db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 415db64fe02SNick Piggin 416db64fe02SNick Piggin return va; 417db64fe02SNick Piggin } 418db64fe02SNick Piggin 419db64fe02SNick Piggin static void rcu_free_va(struct rcu_head *head) 420db64fe02SNick Piggin { 421db64fe02SNick Piggin struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); 422db64fe02SNick Piggin 423db64fe02SNick Piggin kfree(va); 424db64fe02SNick Piggin } 425db64fe02SNick Piggin 426db64fe02SNick Piggin static void __free_vmap_area(struct vmap_area *va) 427db64fe02SNick Piggin { 428db64fe02SNick Piggin BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 429db64fe02SNick Piggin rb_erase(&va->rb_node, &vmap_area_root); 430db64fe02SNick Piggin RB_CLEAR_NODE(&va->rb_node); 431db64fe02SNick Piggin list_del_rcu(&va->list); 432db64fe02SNick Piggin 433ca23e405STejun Heo /* 434ca23e405STejun Heo * Track the highest possible candidate for pcpu area 435ca23e405STejun Heo * allocation. Areas outside of vmalloc area can be returned 436ca23e405STejun Heo * here too, consider only end addresses which fall inside 437ca23e405STejun Heo * vmalloc area proper. 438ca23e405STejun Heo */ 439ca23e405STejun Heo if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 440ca23e405STejun Heo vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 441ca23e405STejun Heo 442db64fe02SNick Piggin call_rcu(&va->rcu_head, rcu_free_va); 443db64fe02SNick Piggin } 444db64fe02SNick Piggin 445db64fe02SNick Piggin /* 446db64fe02SNick Piggin * Free a region of KVA allocated by alloc_vmap_area 447db64fe02SNick Piggin */ 448db64fe02SNick Piggin static void free_vmap_area(struct vmap_area *va) 449db64fe02SNick Piggin { 450db64fe02SNick Piggin spin_lock(&vmap_area_lock); 451db64fe02SNick Piggin __free_vmap_area(va); 452db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 453db64fe02SNick Piggin } 454db64fe02SNick Piggin 455db64fe02SNick Piggin /* 456db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 457db64fe02SNick Piggin */ 458db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 459db64fe02SNick Piggin { 460db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 461db64fe02SNick Piggin } 462db64fe02SNick Piggin 463cd52858cSNick Piggin static void vmap_debug_free_range(unsigned long start, unsigned long end) 464cd52858cSNick Piggin { 465cd52858cSNick Piggin /* 466cd52858cSNick Piggin * Unmap page tables and force a TLB flush immediately if 467cd52858cSNick Piggin * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 468cd52858cSNick Piggin * bugs similarly to those in linear kernel virtual address 469cd52858cSNick Piggin * space after a page has been freed. 470cd52858cSNick Piggin * 471cd52858cSNick Piggin * All the lazy freeing logic is still retained, in order to 472cd52858cSNick Piggin * minimise intrusiveness of this debugging feature. 473cd52858cSNick Piggin * 474cd52858cSNick Piggin * This is going to be *slow* (linear kernel virtual address 475cd52858cSNick Piggin * debugging doesn't do a broadcast TLB flush so it is a lot 476cd52858cSNick Piggin * faster). 477cd52858cSNick Piggin */ 478cd52858cSNick Piggin #ifdef CONFIG_DEBUG_PAGEALLOC 479cd52858cSNick Piggin vunmap_page_range(start, end); 480cd52858cSNick Piggin flush_tlb_kernel_range(start, end); 481cd52858cSNick Piggin #endif 482cd52858cSNick Piggin } 483cd52858cSNick Piggin 484db64fe02SNick Piggin /* 485db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 486db64fe02SNick Piggin * before attempting to purge with a TLB flush. 487db64fe02SNick Piggin * 488db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 489db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 490db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 491db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 492db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 493db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 494db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 495db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 496db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 497db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 498db64fe02SNick Piggin * becomes a problem on bigger systems. 499db64fe02SNick Piggin */ 500db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 501db64fe02SNick Piggin { 502db64fe02SNick Piggin unsigned int log; 503db64fe02SNick Piggin 504db64fe02SNick Piggin log = fls(num_online_cpus()); 505db64fe02SNick Piggin 506db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 507db64fe02SNick Piggin } 508db64fe02SNick Piggin 509db64fe02SNick Piggin static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 510db64fe02SNick Piggin 51102b709dfSNick Piggin /* for per-CPU blocks */ 51202b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 51302b709dfSNick Piggin 514db64fe02SNick Piggin /* 5153ee48b6aSCliff Wickman * called before a call to iounmap() if the caller wants vm_area_struct's 5163ee48b6aSCliff Wickman * immediately freed. 5173ee48b6aSCliff Wickman */ 5183ee48b6aSCliff Wickman void set_iounmap_nonlazy(void) 5193ee48b6aSCliff Wickman { 5203ee48b6aSCliff Wickman atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); 5213ee48b6aSCliff Wickman } 5223ee48b6aSCliff Wickman 5233ee48b6aSCliff Wickman /* 524db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 525db64fe02SNick Piggin * 526db64fe02SNick Piggin * If sync is 0 then don't purge if there is already a purge in progress. 527db64fe02SNick Piggin * If force_flush is 1, then flush kernel TLBs between *start and *end even 528db64fe02SNick Piggin * if we found no lazy vmap areas to unmap (callers can use this to optimise 529db64fe02SNick Piggin * their own TLB flushing). 530db64fe02SNick Piggin * Returns with *start = min(*start, lowest purged address) 531db64fe02SNick Piggin * *end = max(*end, highest purged address) 532db64fe02SNick Piggin */ 533db64fe02SNick Piggin static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 534db64fe02SNick Piggin int sync, int force_flush) 535db64fe02SNick Piggin { 53646666d8aSAndrew Morton static DEFINE_SPINLOCK(purge_lock); 537db64fe02SNick Piggin LIST_HEAD(valist); 538db64fe02SNick Piggin struct vmap_area *va; 539cbb76676SVegard Nossum struct vmap_area *n_va; 540db64fe02SNick Piggin int nr = 0; 541db64fe02SNick Piggin 542db64fe02SNick Piggin /* 543db64fe02SNick Piggin * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 544db64fe02SNick Piggin * should not expect such behaviour. This just simplifies locking for 545db64fe02SNick Piggin * the case that isn't actually used at the moment anyway. 546db64fe02SNick Piggin */ 547db64fe02SNick Piggin if (!sync && !force_flush) { 54846666d8aSAndrew Morton if (!spin_trylock(&purge_lock)) 549db64fe02SNick Piggin return; 550db64fe02SNick Piggin } else 55146666d8aSAndrew Morton spin_lock(&purge_lock); 552db64fe02SNick Piggin 55302b709dfSNick Piggin if (sync) 55402b709dfSNick Piggin purge_fragmented_blocks_allcpus(); 55502b709dfSNick Piggin 556db64fe02SNick Piggin rcu_read_lock(); 557db64fe02SNick Piggin list_for_each_entry_rcu(va, &vmap_area_list, list) { 558db64fe02SNick Piggin if (va->flags & VM_LAZY_FREE) { 559db64fe02SNick Piggin if (va->va_start < *start) 560db64fe02SNick Piggin *start = va->va_start; 561db64fe02SNick Piggin if (va->va_end > *end) 562db64fe02SNick Piggin *end = va->va_end; 563db64fe02SNick Piggin nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 564db64fe02SNick Piggin list_add_tail(&va->purge_list, &valist); 565db64fe02SNick Piggin va->flags |= VM_LAZY_FREEING; 566db64fe02SNick Piggin va->flags &= ~VM_LAZY_FREE; 567db64fe02SNick Piggin } 568db64fe02SNick Piggin } 569db64fe02SNick Piggin rcu_read_unlock(); 570db64fe02SNick Piggin 57188f50044SYongseok Koh if (nr) 572db64fe02SNick Piggin atomic_sub(nr, &vmap_lazy_nr); 573db64fe02SNick Piggin 574db64fe02SNick Piggin if (nr || force_flush) 575db64fe02SNick Piggin flush_tlb_kernel_range(*start, *end); 576db64fe02SNick Piggin 577db64fe02SNick Piggin if (nr) { 578db64fe02SNick Piggin spin_lock(&vmap_area_lock); 579cbb76676SVegard Nossum list_for_each_entry_safe(va, n_va, &valist, purge_list) 580db64fe02SNick Piggin __free_vmap_area(va); 581db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 582db64fe02SNick Piggin } 58346666d8aSAndrew Morton spin_unlock(&purge_lock); 584db64fe02SNick Piggin } 585db64fe02SNick Piggin 586db64fe02SNick Piggin /* 587496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 588496850e5SNick Piggin * is already purging. 589496850e5SNick Piggin */ 590496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 591496850e5SNick Piggin { 592496850e5SNick Piggin unsigned long start = ULONG_MAX, end = 0; 593496850e5SNick Piggin 594496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 0, 0); 595496850e5SNick Piggin } 596496850e5SNick Piggin 597496850e5SNick Piggin /* 598db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 599db64fe02SNick Piggin */ 600db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 601db64fe02SNick Piggin { 602db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 603db64fe02SNick Piggin 604496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, 0); 605db64fe02SNick Piggin } 606db64fe02SNick Piggin 607db64fe02SNick Piggin /* 60864141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 60964141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 61064141da5SJeremy Fitzhardinge * previously. 611db64fe02SNick Piggin */ 61264141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 613db64fe02SNick Piggin { 614db64fe02SNick Piggin va->flags |= VM_LAZY_FREE; 615db64fe02SNick Piggin atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 616db64fe02SNick Piggin if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 617496850e5SNick Piggin try_purge_vmap_area_lazy(); 618db64fe02SNick Piggin } 619db64fe02SNick Piggin 620b29acbdcSNick Piggin /* 62164141da5SJeremy Fitzhardinge * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 62264141da5SJeremy Fitzhardinge * called for the correct range previously. 62364141da5SJeremy Fitzhardinge */ 62464141da5SJeremy Fitzhardinge static void free_unmap_vmap_area_noflush(struct vmap_area *va) 62564141da5SJeremy Fitzhardinge { 62664141da5SJeremy Fitzhardinge unmap_vmap_area(va); 62764141da5SJeremy Fitzhardinge free_vmap_area_noflush(va); 62864141da5SJeremy Fitzhardinge } 62964141da5SJeremy Fitzhardinge 63064141da5SJeremy Fitzhardinge /* 631b29acbdcSNick Piggin * Free and unmap a vmap area 632b29acbdcSNick Piggin */ 633b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 634b29acbdcSNick Piggin { 635b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 636b29acbdcSNick Piggin free_unmap_vmap_area_noflush(va); 637b29acbdcSNick Piggin } 638b29acbdcSNick Piggin 639db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 640db64fe02SNick Piggin { 641db64fe02SNick Piggin struct vmap_area *va; 642db64fe02SNick Piggin 643db64fe02SNick Piggin spin_lock(&vmap_area_lock); 644db64fe02SNick Piggin va = __find_vmap_area(addr); 645db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 646db64fe02SNick Piggin 647db64fe02SNick Piggin return va; 648db64fe02SNick Piggin } 649db64fe02SNick Piggin 650db64fe02SNick Piggin static void free_unmap_vmap_area_addr(unsigned long addr) 651db64fe02SNick Piggin { 652db64fe02SNick Piggin struct vmap_area *va; 653db64fe02SNick Piggin 654db64fe02SNick Piggin va = find_vmap_area(addr); 655db64fe02SNick Piggin BUG_ON(!va); 656db64fe02SNick Piggin free_unmap_vmap_area(va); 657db64fe02SNick Piggin } 658db64fe02SNick Piggin 659db64fe02SNick Piggin 660db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 661db64fe02SNick Piggin 662db64fe02SNick Piggin /* 663db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 664db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 665db64fe02SNick Piggin */ 666db64fe02SNick Piggin /* 667db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 668db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 669db64fe02SNick Piggin * instead (we just need a rough idea) 670db64fe02SNick Piggin */ 671db64fe02SNick Piggin #if BITS_PER_LONG == 32 672db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 673db64fe02SNick Piggin #else 674db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 675db64fe02SNick Piggin #endif 676db64fe02SNick Piggin 677db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 678db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 679db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 680db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 681db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 682db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 683db64fe02SNick Piggin #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 684db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 685db64fe02SNick Piggin VMALLOC_PAGES / NR_CPUS / 16)) 686db64fe02SNick Piggin 687db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 688db64fe02SNick Piggin 6899b463334SJeremy Fitzhardinge static bool vmap_initialized __read_mostly = false; 6909b463334SJeremy Fitzhardinge 691db64fe02SNick Piggin struct vmap_block_queue { 692db64fe02SNick Piggin spinlock_t lock; 693db64fe02SNick Piggin struct list_head free; 694db64fe02SNick Piggin }; 695db64fe02SNick Piggin 696db64fe02SNick Piggin struct vmap_block { 697db64fe02SNick Piggin spinlock_t lock; 698db64fe02SNick Piggin struct vmap_area *va; 699db64fe02SNick Piggin struct vmap_block_queue *vbq; 700db64fe02SNick Piggin unsigned long free, dirty; 701db64fe02SNick Piggin DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 702db64fe02SNick Piggin DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 703db64fe02SNick Piggin struct list_head free_list; 704db64fe02SNick Piggin struct rcu_head rcu_head; 70502b709dfSNick Piggin struct list_head purge; 706db64fe02SNick Piggin }; 707db64fe02SNick Piggin 708db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 709db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 710db64fe02SNick Piggin 711db64fe02SNick Piggin /* 712db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 713db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 714db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 715db64fe02SNick Piggin */ 716db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 717db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 718db64fe02SNick Piggin 719db64fe02SNick Piggin /* 720db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 721db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 722db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 723db64fe02SNick Piggin * big problem. 724db64fe02SNick Piggin */ 725db64fe02SNick Piggin 726db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 727db64fe02SNick Piggin { 728db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 729db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 730db64fe02SNick Piggin return addr; 731db64fe02SNick Piggin } 732db64fe02SNick Piggin 733db64fe02SNick Piggin static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 734db64fe02SNick Piggin { 735db64fe02SNick Piggin struct vmap_block_queue *vbq; 736db64fe02SNick Piggin struct vmap_block *vb; 737db64fe02SNick Piggin struct vmap_area *va; 738db64fe02SNick Piggin unsigned long vb_idx; 739db64fe02SNick Piggin int node, err; 740db64fe02SNick Piggin 741db64fe02SNick Piggin node = numa_node_id(); 742db64fe02SNick Piggin 743db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 744db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 745db64fe02SNick Piggin if (unlikely(!vb)) 746db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 747db64fe02SNick Piggin 748db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 749db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 750db64fe02SNick Piggin node, gfp_mask); 751db64fe02SNick Piggin if (unlikely(IS_ERR(va))) { 752db64fe02SNick Piggin kfree(vb); 753e7d86340SJulia Lawall return ERR_CAST(va); 754db64fe02SNick Piggin } 755db64fe02SNick Piggin 756db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 757db64fe02SNick Piggin if (unlikely(err)) { 758db64fe02SNick Piggin kfree(vb); 759db64fe02SNick Piggin free_vmap_area(va); 760db64fe02SNick Piggin return ERR_PTR(err); 761db64fe02SNick Piggin } 762db64fe02SNick Piggin 763db64fe02SNick Piggin spin_lock_init(&vb->lock); 764db64fe02SNick Piggin vb->va = va; 765db64fe02SNick Piggin vb->free = VMAP_BBMAP_BITS; 766db64fe02SNick Piggin vb->dirty = 0; 767db64fe02SNick Piggin bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 768db64fe02SNick Piggin bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 769db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 770db64fe02SNick Piggin 771db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 772db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 773db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 774db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 775db64fe02SNick Piggin BUG_ON(err); 776db64fe02SNick Piggin radix_tree_preload_end(); 777db64fe02SNick Piggin 778db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 779db64fe02SNick Piggin vb->vbq = vbq; 780db64fe02SNick Piggin spin_lock(&vbq->lock); 781de560423SNick Piggin list_add_rcu(&vb->free_list, &vbq->free); 782db64fe02SNick Piggin spin_unlock(&vbq->lock); 7833f04ba85STejun Heo put_cpu_var(vmap_block_queue); 784db64fe02SNick Piggin 785db64fe02SNick Piggin return vb; 786db64fe02SNick Piggin } 787db64fe02SNick Piggin 788db64fe02SNick Piggin static void rcu_free_vb(struct rcu_head *head) 789db64fe02SNick Piggin { 790db64fe02SNick Piggin struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); 791db64fe02SNick Piggin 792db64fe02SNick Piggin kfree(vb); 793db64fe02SNick Piggin } 794db64fe02SNick Piggin 795db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 796db64fe02SNick Piggin { 797db64fe02SNick Piggin struct vmap_block *tmp; 798db64fe02SNick Piggin unsigned long vb_idx; 799db64fe02SNick Piggin 800db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 801db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 802db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 803db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 804db64fe02SNick Piggin BUG_ON(tmp != vb); 805db64fe02SNick Piggin 80664141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 807db64fe02SNick Piggin call_rcu(&vb->rcu_head, rcu_free_vb); 808db64fe02SNick Piggin } 809db64fe02SNick Piggin 81002b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 81102b709dfSNick Piggin { 81202b709dfSNick Piggin LIST_HEAD(purge); 81302b709dfSNick Piggin struct vmap_block *vb; 81402b709dfSNick Piggin struct vmap_block *n_vb; 81502b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 81602b709dfSNick Piggin 81702b709dfSNick Piggin rcu_read_lock(); 81802b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 81902b709dfSNick Piggin 82002b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 82102b709dfSNick Piggin continue; 82202b709dfSNick Piggin 82302b709dfSNick Piggin spin_lock(&vb->lock); 82402b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 82502b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 82602b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 82702b709dfSNick Piggin bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS); 82802b709dfSNick Piggin bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); 82902b709dfSNick Piggin spin_lock(&vbq->lock); 83002b709dfSNick Piggin list_del_rcu(&vb->free_list); 83102b709dfSNick Piggin spin_unlock(&vbq->lock); 83202b709dfSNick Piggin spin_unlock(&vb->lock); 83302b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 83402b709dfSNick Piggin } else 83502b709dfSNick Piggin spin_unlock(&vb->lock); 83602b709dfSNick Piggin } 83702b709dfSNick Piggin rcu_read_unlock(); 83802b709dfSNick Piggin 83902b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 84002b709dfSNick Piggin list_del(&vb->purge); 84102b709dfSNick Piggin free_vmap_block(vb); 84202b709dfSNick Piggin } 84302b709dfSNick Piggin } 84402b709dfSNick Piggin 84502b709dfSNick Piggin static void purge_fragmented_blocks_thiscpu(void) 84602b709dfSNick Piggin { 84702b709dfSNick Piggin purge_fragmented_blocks(smp_processor_id()); 84802b709dfSNick Piggin } 84902b709dfSNick Piggin 85002b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 85102b709dfSNick Piggin { 85202b709dfSNick Piggin int cpu; 85302b709dfSNick Piggin 85402b709dfSNick Piggin for_each_possible_cpu(cpu) 85502b709dfSNick Piggin purge_fragmented_blocks(cpu); 85602b709dfSNick Piggin } 85702b709dfSNick Piggin 858db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 859db64fe02SNick Piggin { 860db64fe02SNick Piggin struct vmap_block_queue *vbq; 861db64fe02SNick Piggin struct vmap_block *vb; 862db64fe02SNick Piggin unsigned long addr = 0; 863db64fe02SNick Piggin unsigned int order; 86402b709dfSNick Piggin int purge = 0; 865db64fe02SNick Piggin 866db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 867db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 868db64fe02SNick Piggin order = get_order(size); 869db64fe02SNick Piggin 870db64fe02SNick Piggin again: 871db64fe02SNick Piggin rcu_read_lock(); 872db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 873db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 874db64fe02SNick Piggin int i; 875db64fe02SNick Piggin 876db64fe02SNick Piggin spin_lock(&vb->lock); 87702b709dfSNick Piggin if (vb->free < 1UL << order) 87802b709dfSNick Piggin goto next; 87902b709dfSNick Piggin 880db64fe02SNick Piggin i = bitmap_find_free_region(vb->alloc_map, 881db64fe02SNick Piggin VMAP_BBMAP_BITS, order); 882db64fe02SNick Piggin 88302b709dfSNick Piggin if (i < 0) { 88402b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS) { 88502b709dfSNick Piggin /* fragmented and no outstanding allocations */ 88602b709dfSNick Piggin BUG_ON(vb->dirty != VMAP_BBMAP_BITS); 88702b709dfSNick Piggin purge = 1; 88802b709dfSNick Piggin } 88902b709dfSNick Piggin goto next; 89002b709dfSNick Piggin } 891db64fe02SNick Piggin addr = vb->va->va_start + (i << PAGE_SHIFT); 892db64fe02SNick Piggin BUG_ON(addr_to_vb_idx(addr) != 893db64fe02SNick Piggin addr_to_vb_idx(vb->va->va_start)); 894db64fe02SNick Piggin vb->free -= 1UL << order; 895db64fe02SNick Piggin if (vb->free == 0) { 896db64fe02SNick Piggin spin_lock(&vbq->lock); 897de560423SNick Piggin list_del_rcu(&vb->free_list); 898db64fe02SNick Piggin spin_unlock(&vbq->lock); 899db64fe02SNick Piggin } 900db64fe02SNick Piggin spin_unlock(&vb->lock); 901db64fe02SNick Piggin break; 90202b709dfSNick Piggin next: 903db64fe02SNick Piggin spin_unlock(&vb->lock); 904db64fe02SNick Piggin } 90502b709dfSNick Piggin 90602b709dfSNick Piggin if (purge) 90702b709dfSNick Piggin purge_fragmented_blocks_thiscpu(); 90802b709dfSNick Piggin 9093f04ba85STejun Heo put_cpu_var(vmap_block_queue); 910db64fe02SNick Piggin rcu_read_unlock(); 911db64fe02SNick Piggin 912db64fe02SNick Piggin if (!addr) { 913db64fe02SNick Piggin vb = new_vmap_block(gfp_mask); 914db64fe02SNick Piggin if (IS_ERR(vb)) 915db64fe02SNick Piggin return vb; 916db64fe02SNick Piggin goto again; 917db64fe02SNick Piggin } 918db64fe02SNick Piggin 919db64fe02SNick Piggin return (void *)addr; 920db64fe02SNick Piggin } 921db64fe02SNick Piggin 922db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 923db64fe02SNick Piggin { 924db64fe02SNick Piggin unsigned long offset; 925db64fe02SNick Piggin unsigned long vb_idx; 926db64fe02SNick Piggin unsigned int order; 927db64fe02SNick Piggin struct vmap_block *vb; 928db64fe02SNick Piggin 929db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 930db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 931b29acbdcSNick Piggin 932b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 933b29acbdcSNick Piggin 934db64fe02SNick Piggin order = get_order(size); 935db64fe02SNick Piggin 936db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 937db64fe02SNick Piggin 938db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 939db64fe02SNick Piggin rcu_read_lock(); 940db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 941db64fe02SNick Piggin rcu_read_unlock(); 942db64fe02SNick Piggin BUG_ON(!vb); 943db64fe02SNick Piggin 94464141da5SJeremy Fitzhardinge vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 94564141da5SJeremy Fitzhardinge 946db64fe02SNick Piggin spin_lock(&vb->lock); 947de560423SNick Piggin BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 948d086817dSMinChan Kim 949db64fe02SNick Piggin vb->dirty += 1UL << order; 950db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 951de560423SNick Piggin BUG_ON(vb->free); 952db64fe02SNick Piggin spin_unlock(&vb->lock); 953db64fe02SNick Piggin free_vmap_block(vb); 954db64fe02SNick Piggin } else 955db64fe02SNick Piggin spin_unlock(&vb->lock); 956db64fe02SNick Piggin } 957db64fe02SNick Piggin 958db64fe02SNick Piggin /** 959db64fe02SNick Piggin * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 960db64fe02SNick Piggin * 961db64fe02SNick Piggin * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 962db64fe02SNick Piggin * to amortize TLB flushing overheads. What this means is that any page you 963db64fe02SNick Piggin * have now, may, in a former life, have been mapped into kernel virtual 964db64fe02SNick Piggin * address by the vmap layer and so there might be some CPUs with TLB entries 965db64fe02SNick Piggin * still referencing that page (additional to the regular 1:1 kernel mapping). 966db64fe02SNick Piggin * 967db64fe02SNick Piggin * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 968db64fe02SNick Piggin * be sure that none of the pages we have control over will have any aliases 969db64fe02SNick Piggin * from the vmap layer. 970db64fe02SNick Piggin */ 971db64fe02SNick Piggin void vm_unmap_aliases(void) 972db64fe02SNick Piggin { 973db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 974db64fe02SNick Piggin int cpu; 975db64fe02SNick Piggin int flush = 0; 976db64fe02SNick Piggin 9779b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 9789b463334SJeremy Fitzhardinge return; 9799b463334SJeremy Fitzhardinge 980db64fe02SNick Piggin for_each_possible_cpu(cpu) { 981db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 982db64fe02SNick Piggin struct vmap_block *vb; 983db64fe02SNick Piggin 984db64fe02SNick Piggin rcu_read_lock(); 985db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 986db64fe02SNick Piggin int i; 987db64fe02SNick Piggin 988db64fe02SNick Piggin spin_lock(&vb->lock); 989db64fe02SNick Piggin i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 990db64fe02SNick Piggin while (i < VMAP_BBMAP_BITS) { 991db64fe02SNick Piggin unsigned long s, e; 992db64fe02SNick Piggin int j; 993db64fe02SNick Piggin j = find_next_zero_bit(vb->dirty_map, 994db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 995db64fe02SNick Piggin 996db64fe02SNick Piggin s = vb->va->va_start + (i << PAGE_SHIFT); 997db64fe02SNick Piggin e = vb->va->va_start + (j << PAGE_SHIFT); 998db64fe02SNick Piggin flush = 1; 999db64fe02SNick Piggin 1000db64fe02SNick Piggin if (s < start) 1001db64fe02SNick Piggin start = s; 1002db64fe02SNick Piggin if (e > end) 1003db64fe02SNick Piggin end = e; 1004db64fe02SNick Piggin 1005db64fe02SNick Piggin i = j; 1006db64fe02SNick Piggin i = find_next_bit(vb->dirty_map, 1007db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 1008db64fe02SNick Piggin } 1009db64fe02SNick Piggin spin_unlock(&vb->lock); 1010db64fe02SNick Piggin } 1011db64fe02SNick Piggin rcu_read_unlock(); 1012db64fe02SNick Piggin } 1013db64fe02SNick Piggin 1014db64fe02SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, flush); 1015db64fe02SNick Piggin } 1016db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1017db64fe02SNick Piggin 1018db64fe02SNick Piggin /** 1019db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1020db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 1021db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1022db64fe02SNick Piggin */ 1023db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 1024db64fe02SNick Piggin { 1025db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 1026db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 1027db64fe02SNick Piggin 1028db64fe02SNick Piggin BUG_ON(!addr); 1029db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 1030db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 1031db64fe02SNick Piggin BUG_ON(addr & (PAGE_SIZE-1)); 1032db64fe02SNick Piggin 1033db64fe02SNick Piggin debug_check_no_locks_freed(mem, size); 1034cd52858cSNick Piggin vmap_debug_free_range(addr, addr+size); 1035db64fe02SNick Piggin 1036db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) 1037db64fe02SNick Piggin vb_free(mem, size); 1038db64fe02SNick Piggin else 1039db64fe02SNick Piggin free_unmap_vmap_area_addr(addr); 1040db64fe02SNick Piggin } 1041db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 1042db64fe02SNick Piggin 1043db64fe02SNick Piggin /** 1044db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1045db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 1046db64fe02SNick Piggin * @count: number of pages 1047db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 1048db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1049e99c97adSRandy Dunlap * 1050e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 1051db64fe02SNick Piggin */ 1052db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1053db64fe02SNick Piggin { 1054db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 1055db64fe02SNick Piggin unsigned long addr; 1056db64fe02SNick Piggin void *mem; 1057db64fe02SNick Piggin 1058db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 1059db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 1060db64fe02SNick Piggin if (IS_ERR(mem)) 1061db64fe02SNick Piggin return NULL; 1062db64fe02SNick Piggin addr = (unsigned long)mem; 1063db64fe02SNick Piggin } else { 1064db64fe02SNick Piggin struct vmap_area *va; 1065db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 1066db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1067db64fe02SNick Piggin if (IS_ERR(va)) 1068db64fe02SNick Piggin return NULL; 1069db64fe02SNick Piggin 1070db64fe02SNick Piggin addr = va->va_start; 1071db64fe02SNick Piggin mem = (void *)addr; 1072db64fe02SNick Piggin } 1073db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1074db64fe02SNick Piggin vm_unmap_ram(mem, count); 1075db64fe02SNick Piggin return NULL; 1076db64fe02SNick Piggin } 1077db64fe02SNick Piggin return mem; 1078db64fe02SNick Piggin } 1079db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 1080db64fe02SNick Piggin 1081f0aa6617STejun Heo /** 1082f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 1083f0aa6617STejun Heo * @vm: vm_struct to register 1084c0c0a293STejun Heo * @align: requested alignment 1085f0aa6617STejun Heo * 1086f0aa6617STejun Heo * This function is used to register kernel vm area before 1087f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 1088f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 1089f0aa6617STejun Heo * vm->addr contains the allocated address. 1090f0aa6617STejun Heo * 1091f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1092f0aa6617STejun Heo */ 1093c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1094f0aa6617STejun Heo { 1095f0aa6617STejun Heo static size_t vm_init_off __initdata; 1096c0c0a293STejun Heo unsigned long addr; 1097f0aa6617STejun Heo 1098c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 1099c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1100c0c0a293STejun Heo 1101c0c0a293STejun Heo vm->addr = (void *)addr; 1102f0aa6617STejun Heo 1103f0aa6617STejun Heo vm->next = vmlist; 1104f0aa6617STejun Heo vmlist = vm; 1105f0aa6617STejun Heo } 1106f0aa6617STejun Heo 1107db64fe02SNick Piggin void __init vmalloc_init(void) 1108db64fe02SNick Piggin { 1109822c18f2SIvan Kokshaysky struct vmap_area *va; 1110822c18f2SIvan Kokshaysky struct vm_struct *tmp; 1111db64fe02SNick Piggin int i; 1112db64fe02SNick Piggin 1113db64fe02SNick Piggin for_each_possible_cpu(i) { 1114db64fe02SNick Piggin struct vmap_block_queue *vbq; 1115db64fe02SNick Piggin 1116db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1117db64fe02SNick Piggin spin_lock_init(&vbq->lock); 1118db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 1119db64fe02SNick Piggin } 11209b463334SJeremy Fitzhardinge 1121822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1122822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 112343ebdac4SPekka Enberg va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1124822c18f2SIvan Kokshaysky va->flags = tmp->flags | VM_VM_AREA; 1125822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1126822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1127822c18f2SIvan Kokshaysky __insert_vmap_area(va); 1128822c18f2SIvan Kokshaysky } 1129ca23e405STejun Heo 1130ca23e405STejun Heo vmap_area_pcpu_hole = VMALLOC_END; 1131ca23e405STejun Heo 11329b463334SJeremy Fitzhardinge vmap_initialized = true; 1133db64fe02SNick Piggin } 1134db64fe02SNick Piggin 11358fc48985STejun Heo /** 11368fc48985STejun Heo * map_kernel_range_noflush - map kernel VM area with the specified pages 11378fc48985STejun Heo * @addr: start of the VM area to map 11388fc48985STejun Heo * @size: size of the VM area to map 11398fc48985STejun Heo * @prot: page protection flags to use 11408fc48985STejun Heo * @pages: pages to map 11418fc48985STejun Heo * 11428fc48985STejun Heo * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 11438fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 11448fc48985STejun Heo * friends. 11458fc48985STejun Heo * 11468fc48985STejun Heo * NOTE: 11478fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 11488fc48985STejun Heo * responsible for calling flush_cache_vmap() on to-be-mapped areas 11498fc48985STejun Heo * before calling this function. 11508fc48985STejun Heo * 11518fc48985STejun Heo * RETURNS: 11528fc48985STejun Heo * The number of pages mapped on success, -errno on failure. 11538fc48985STejun Heo */ 11548fc48985STejun Heo int map_kernel_range_noflush(unsigned long addr, unsigned long size, 11558fc48985STejun Heo pgprot_t prot, struct page **pages) 11568fc48985STejun Heo { 11578fc48985STejun Heo return vmap_page_range_noflush(addr, addr + size, prot, pages); 11588fc48985STejun Heo } 11598fc48985STejun Heo 11608fc48985STejun Heo /** 11618fc48985STejun Heo * unmap_kernel_range_noflush - unmap kernel VM area 11628fc48985STejun Heo * @addr: start of the VM area to unmap 11638fc48985STejun Heo * @size: size of the VM area to unmap 11648fc48985STejun Heo * 11658fc48985STejun Heo * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 11668fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 11678fc48985STejun Heo * friends. 11688fc48985STejun Heo * 11698fc48985STejun Heo * NOTE: 11708fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 11718fc48985STejun Heo * responsible for calling flush_cache_vunmap() on to-be-mapped areas 11728fc48985STejun Heo * before calling this function and flush_tlb_kernel_range() after. 11738fc48985STejun Heo */ 11748fc48985STejun Heo void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 11758fc48985STejun Heo { 11768fc48985STejun Heo vunmap_page_range(addr, addr + size); 11778fc48985STejun Heo } 11788fc48985STejun Heo 11798fc48985STejun Heo /** 11808fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 11818fc48985STejun Heo * @addr: start of the VM area to unmap 11828fc48985STejun Heo * @size: size of the VM area to unmap 11838fc48985STejun Heo * 11848fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 11858fc48985STejun Heo * the unmapping and tlb after. 11868fc48985STejun Heo */ 1187db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 1188db64fe02SNick Piggin { 1189db64fe02SNick Piggin unsigned long end = addr + size; 1190f6fcba70STejun Heo 1191f6fcba70STejun Heo flush_cache_vunmap(addr, end); 1192db64fe02SNick Piggin vunmap_page_range(addr, end); 1193db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 1194db64fe02SNick Piggin } 1195db64fe02SNick Piggin 1196db64fe02SNick Piggin int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1197db64fe02SNick Piggin { 1198db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 1199db64fe02SNick Piggin unsigned long end = addr + area->size - PAGE_SIZE; 1200db64fe02SNick Piggin int err; 1201db64fe02SNick Piggin 1202db64fe02SNick Piggin err = vmap_page_range(addr, end, prot, *pages); 1203db64fe02SNick Piggin if (err > 0) { 1204db64fe02SNick Piggin *pages += err; 1205db64fe02SNick Piggin err = 0; 1206db64fe02SNick Piggin } 1207db64fe02SNick Piggin 1208db64fe02SNick Piggin return err; 1209db64fe02SNick Piggin } 1210db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 1211db64fe02SNick Piggin 1212db64fe02SNick Piggin /*** Old vmalloc interfaces ***/ 1213db64fe02SNick Piggin DEFINE_RWLOCK(vmlist_lock); 1214db64fe02SNick Piggin struct vm_struct *vmlist; 1215db64fe02SNick Piggin 1216cf88c790STejun Heo static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 1217cf88c790STejun Heo unsigned long flags, void *caller) 1218cf88c790STejun Heo { 1219cf88c790STejun Heo struct vm_struct *tmp, **p; 1220cf88c790STejun Heo 1221cf88c790STejun Heo vm->flags = flags; 1222cf88c790STejun Heo vm->addr = (void *)va->va_start; 1223cf88c790STejun Heo vm->size = va->va_end - va->va_start; 1224cf88c790STejun Heo vm->caller = caller; 1225cf88c790STejun Heo va->private = vm; 1226cf88c790STejun Heo va->flags |= VM_VM_AREA; 1227cf88c790STejun Heo 1228cf88c790STejun Heo write_lock(&vmlist_lock); 1229cf88c790STejun Heo for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1230cf88c790STejun Heo if (tmp->addr >= vm->addr) 1231cf88c790STejun Heo break; 1232cf88c790STejun Heo } 1233cf88c790STejun Heo vm->next = *p; 1234cf88c790STejun Heo *p = vm; 1235cf88c790STejun Heo write_unlock(&vmlist_lock); 1236cf88c790STejun Heo } 1237cf88c790STejun Heo 1238db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 12392dca6999SDavid Miller unsigned long align, unsigned long flags, unsigned long start, 12402dca6999SDavid Miller unsigned long end, int node, gfp_t gfp_mask, void *caller) 1241db64fe02SNick Piggin { 1242db64fe02SNick Piggin static struct vmap_area *va; 1243db64fe02SNick Piggin struct vm_struct *area; 12441da177e4SLinus Torvalds 124552fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 12461da177e4SLinus Torvalds if (flags & VM_IOREMAP) { 12471da177e4SLinus Torvalds int bit = fls(size); 12481da177e4SLinus Torvalds 12491da177e4SLinus Torvalds if (bit > IOREMAP_MAX_ORDER) 12501da177e4SLinus Torvalds bit = IOREMAP_MAX_ORDER; 12511da177e4SLinus Torvalds else if (bit < PAGE_SHIFT) 12521da177e4SLinus Torvalds bit = PAGE_SHIFT; 12531da177e4SLinus Torvalds 12541da177e4SLinus Torvalds align = 1ul << bit; 12551da177e4SLinus Torvalds } 1256db64fe02SNick Piggin 12571da177e4SLinus Torvalds size = PAGE_ALIGN(size); 125831be8309SOGAWA Hirofumi if (unlikely(!size)) 125931be8309SOGAWA Hirofumi return NULL; 12601da177e4SLinus Torvalds 1261cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 12621da177e4SLinus Torvalds if (unlikely(!area)) 12631da177e4SLinus Torvalds return NULL; 12641da177e4SLinus Torvalds 12651da177e4SLinus Torvalds /* 12661da177e4SLinus Torvalds * We always allocate a guard page. 12671da177e4SLinus Torvalds */ 12681da177e4SLinus Torvalds size += PAGE_SIZE; 12691da177e4SLinus Torvalds 1270db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1271db64fe02SNick Piggin if (IS_ERR(va)) { 1272db64fe02SNick Piggin kfree(area); 1273db64fe02SNick Piggin return NULL; 12741da177e4SLinus Torvalds } 12751da177e4SLinus Torvalds 1276cf88c790STejun Heo insert_vmalloc_vm(area, va, flags, caller); 12771da177e4SLinus Torvalds return area; 12781da177e4SLinus Torvalds } 12791da177e4SLinus Torvalds 1280930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1281930fc45aSChristoph Lameter unsigned long start, unsigned long end) 1282930fc45aSChristoph Lameter { 12832dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, 128423016969SChristoph Lameter __builtin_return_address(0)); 1285930fc45aSChristoph Lameter } 12865992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 1287930fc45aSChristoph Lameter 1288c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1289c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 1290c2968612SBenjamin Herrenschmidt void *caller) 1291c2968612SBenjamin Herrenschmidt { 12922dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, 1293c2968612SBenjamin Herrenschmidt caller); 1294c2968612SBenjamin Herrenschmidt } 1295c2968612SBenjamin Herrenschmidt 12961da177e4SLinus Torvalds /** 1297183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 12981da177e4SLinus Torvalds * @size: size of the area 12991da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 13001da177e4SLinus Torvalds * 13011da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 13021da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 13031da177e4SLinus Torvalds * on success or %NULL on failure. 13041da177e4SLinus Torvalds */ 13051da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 13061da177e4SLinus Torvalds { 13072dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 130823016969SChristoph Lameter -1, GFP_KERNEL, __builtin_return_address(0)); 130923016969SChristoph Lameter } 131023016969SChristoph Lameter 131123016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 131223016969SChristoph Lameter void *caller) 131323016969SChristoph Lameter { 13142dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 131523016969SChristoph Lameter -1, GFP_KERNEL, caller); 13161da177e4SLinus Torvalds } 13171da177e4SLinus Torvalds 1318db64fe02SNick Piggin static struct vm_struct *find_vm_area(const void *addr) 131983342314SNick Piggin { 1320db64fe02SNick Piggin struct vmap_area *va; 132183342314SNick Piggin 1322db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1323db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) 1324db64fe02SNick Piggin return va->private; 132583342314SNick Piggin 13267856dfebSAndi Kleen return NULL; 13277856dfebSAndi Kleen } 13287856dfebSAndi Kleen 13291da177e4SLinus Torvalds /** 1330183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 13311da177e4SLinus Torvalds * @addr: base address 13321da177e4SLinus Torvalds * 13331da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 13341da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 13357856dfebSAndi Kleen * on SMP machines, except for its size or flags. 13361da177e4SLinus Torvalds */ 1337b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 13381da177e4SLinus Torvalds { 1339db64fe02SNick Piggin struct vmap_area *va; 1340db64fe02SNick Piggin 1341db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1342db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) { 1343db64fe02SNick Piggin struct vm_struct *vm = va->private; 1344db64fe02SNick Piggin struct vm_struct *tmp, **p; 1345dd32c279SKAMEZAWA Hiroyuki /* 1346dd32c279SKAMEZAWA Hiroyuki * remove from list and disallow access to this vm_struct 1347dd32c279SKAMEZAWA Hiroyuki * before unmap. (address range confliction is maintained by 1348dd32c279SKAMEZAWA Hiroyuki * vmap.) 1349dd32c279SKAMEZAWA Hiroyuki */ 13501da177e4SLinus Torvalds write_lock(&vmlist_lock); 1351db64fe02SNick Piggin for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) 1352db64fe02SNick Piggin ; 1353db64fe02SNick Piggin *p = tmp->next; 13541da177e4SLinus Torvalds write_unlock(&vmlist_lock); 1355db64fe02SNick Piggin 1356dd32c279SKAMEZAWA Hiroyuki vmap_debug_free_range(va->va_start, va->va_end); 1357dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 1358dd32c279SKAMEZAWA Hiroyuki vm->size -= PAGE_SIZE; 1359dd32c279SKAMEZAWA Hiroyuki 1360db64fe02SNick Piggin return vm; 1361db64fe02SNick Piggin } 1362db64fe02SNick Piggin return NULL; 13631da177e4SLinus Torvalds } 13641da177e4SLinus Torvalds 1365b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 13661da177e4SLinus Torvalds { 13671da177e4SLinus Torvalds struct vm_struct *area; 13681da177e4SLinus Torvalds 13691da177e4SLinus Torvalds if (!addr) 13701da177e4SLinus Torvalds return; 13711da177e4SLinus Torvalds 13721da177e4SLinus Torvalds if ((PAGE_SIZE-1) & (unsigned long)addr) { 13734c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 13741da177e4SLinus Torvalds return; 13751da177e4SLinus Torvalds } 13761da177e4SLinus Torvalds 13771da177e4SLinus Torvalds area = remove_vm_area(addr); 13781da177e4SLinus Torvalds if (unlikely(!area)) { 13794c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 13801da177e4SLinus Torvalds addr); 13811da177e4SLinus Torvalds return; 13821da177e4SLinus Torvalds } 13831da177e4SLinus Torvalds 13849a11b49aSIngo Molnar debug_check_no_locks_freed(addr, area->size); 13853ac7fe5aSThomas Gleixner debug_check_no_obj_freed(addr, area->size); 13869a11b49aSIngo Molnar 13871da177e4SLinus Torvalds if (deallocate_pages) { 13881da177e4SLinus Torvalds int i; 13891da177e4SLinus Torvalds 13901da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1391bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 1392bf53d6f8SChristoph Lameter 1393bf53d6f8SChristoph Lameter BUG_ON(!page); 1394bf53d6f8SChristoph Lameter __free_page(page); 13951da177e4SLinus Torvalds } 13961da177e4SLinus Torvalds 13978757d5faSJan Kiszka if (area->flags & VM_VPAGES) 13981da177e4SLinus Torvalds vfree(area->pages); 13991da177e4SLinus Torvalds else 14001da177e4SLinus Torvalds kfree(area->pages); 14011da177e4SLinus Torvalds } 14021da177e4SLinus Torvalds 14031da177e4SLinus Torvalds kfree(area); 14041da177e4SLinus Torvalds return; 14051da177e4SLinus Torvalds } 14061da177e4SLinus Torvalds 14071da177e4SLinus Torvalds /** 14081da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 14091da177e4SLinus Torvalds * @addr: memory base address 14101da177e4SLinus Torvalds * 1411183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 141280e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 141380e93effSPekka Enberg * NULL, no operation is performed. 14141da177e4SLinus Torvalds * 141580e93effSPekka Enberg * Must not be called in interrupt context. 14161da177e4SLinus Torvalds */ 1417b3bdda02SChristoph Lameter void vfree(const void *addr) 14181da177e4SLinus Torvalds { 14191da177e4SLinus Torvalds BUG_ON(in_interrupt()); 142089219d37SCatalin Marinas 142189219d37SCatalin Marinas kmemleak_free(addr); 142289219d37SCatalin Marinas 14231da177e4SLinus Torvalds __vunmap(addr, 1); 14241da177e4SLinus Torvalds } 14251da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 14261da177e4SLinus Torvalds 14271da177e4SLinus Torvalds /** 14281da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 14291da177e4SLinus Torvalds * @addr: memory base address 14301da177e4SLinus Torvalds * 14311da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 14321da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 14331da177e4SLinus Torvalds * 143480e93effSPekka Enberg * Must not be called in interrupt context. 14351da177e4SLinus Torvalds */ 1436b3bdda02SChristoph Lameter void vunmap(const void *addr) 14371da177e4SLinus Torvalds { 14381da177e4SLinus Torvalds BUG_ON(in_interrupt()); 143934754b69SPeter Zijlstra might_sleep(); 14401da177e4SLinus Torvalds __vunmap(addr, 0); 14411da177e4SLinus Torvalds } 14421da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 14431da177e4SLinus Torvalds 14441da177e4SLinus Torvalds /** 14451da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 14461da177e4SLinus Torvalds * @pages: array of page pointers 14471da177e4SLinus Torvalds * @count: number of pages to map 14481da177e4SLinus Torvalds * @flags: vm_area->flags 14491da177e4SLinus Torvalds * @prot: page protection for the mapping 14501da177e4SLinus Torvalds * 14511da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 14521da177e4SLinus Torvalds * space. 14531da177e4SLinus Torvalds */ 14541da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 14551da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 14561da177e4SLinus Torvalds { 14571da177e4SLinus Torvalds struct vm_struct *area; 14581da177e4SLinus Torvalds 145934754b69SPeter Zijlstra might_sleep(); 146034754b69SPeter Zijlstra 14614481374cSJan Beulich if (count > totalram_pages) 14621da177e4SLinus Torvalds return NULL; 14631da177e4SLinus Torvalds 146423016969SChristoph Lameter area = get_vm_area_caller((count << PAGE_SHIFT), flags, 146523016969SChristoph Lameter __builtin_return_address(0)); 14661da177e4SLinus Torvalds if (!area) 14671da177e4SLinus Torvalds return NULL; 146823016969SChristoph Lameter 14691da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) { 14701da177e4SLinus Torvalds vunmap(area->addr); 14711da177e4SLinus Torvalds return NULL; 14721da177e4SLinus Torvalds } 14731da177e4SLinus Torvalds 14741da177e4SLinus Torvalds return area->addr; 14751da177e4SLinus Torvalds } 14761da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 14771da177e4SLinus Torvalds 14782dca6999SDavid Miller static void *__vmalloc_node(unsigned long size, unsigned long align, 14792dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 1480db64fe02SNick Piggin int node, void *caller); 1481e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 148223016969SChristoph Lameter pgprot_t prot, int node, void *caller) 14831da177e4SLinus Torvalds { 14841da177e4SLinus Torvalds struct page **pages; 14851da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 1486976d6dfbSJan Beulich gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 14871da177e4SLinus Torvalds 14881da177e4SLinus Torvalds nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 14891da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 14901da177e4SLinus Torvalds 14911da177e4SLinus Torvalds area->nr_pages = nr_pages; 14921da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 14938757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 1494976d6dfbSJan Beulich pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, 149523016969SChristoph Lameter PAGE_KERNEL, node, caller); 14968757d5faSJan Kiszka area->flags |= VM_VPAGES; 1497286e1ea3SAndrew Morton } else { 1498976d6dfbSJan Beulich pages = kmalloc_node(array_size, nested_gfp, node); 1499286e1ea3SAndrew Morton } 15001da177e4SLinus Torvalds area->pages = pages; 150123016969SChristoph Lameter area->caller = caller; 15021da177e4SLinus Torvalds if (!area->pages) { 15031da177e4SLinus Torvalds remove_vm_area(area->addr); 15041da177e4SLinus Torvalds kfree(area); 15051da177e4SLinus Torvalds return NULL; 15061da177e4SLinus Torvalds } 15071da177e4SLinus Torvalds 15081da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1509bf53d6f8SChristoph Lameter struct page *page; 1510bf53d6f8SChristoph Lameter 1511930fc45aSChristoph Lameter if (node < 0) 1512bf53d6f8SChristoph Lameter page = alloc_page(gfp_mask); 1513930fc45aSChristoph Lameter else 1514bf53d6f8SChristoph Lameter page = alloc_pages_node(node, gfp_mask, 0); 1515bf53d6f8SChristoph Lameter 1516bf53d6f8SChristoph Lameter if (unlikely(!page)) { 15171da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 15181da177e4SLinus Torvalds area->nr_pages = i; 15191da177e4SLinus Torvalds goto fail; 15201da177e4SLinus Torvalds } 1521bf53d6f8SChristoph Lameter area->pages[i] = page; 15221da177e4SLinus Torvalds } 15231da177e4SLinus Torvalds 15241da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) 15251da177e4SLinus Torvalds goto fail; 15261da177e4SLinus Torvalds return area->addr; 15271da177e4SLinus Torvalds 15281da177e4SLinus Torvalds fail: 15291da177e4SLinus Torvalds vfree(area->addr); 15301da177e4SLinus Torvalds return NULL; 15311da177e4SLinus Torvalds } 15321da177e4SLinus Torvalds 1533*d0a21265SDavid Rientjes /** 1534*d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 1535*d0a21265SDavid Rientjes * @size: allocation size 1536*d0a21265SDavid Rientjes * @align: desired alignment 1537*d0a21265SDavid Rientjes * @start: vm area range start 1538*d0a21265SDavid Rientjes * @end: vm area range end 1539*d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 1540*d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 1541*d0a21265SDavid Rientjes * @node: node to use for allocation or -1 1542*d0a21265SDavid Rientjes * @caller: caller's return address 1543*d0a21265SDavid Rientjes * 1544*d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 1545*d0a21265SDavid Rientjes * allocator with @gfp_mask flags. Map them into contiguous 1546*d0a21265SDavid Rientjes * kernel virtual space, using a pagetable protection of @prot. 1547*d0a21265SDavid Rientjes */ 1548*d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 1549*d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 1550*d0a21265SDavid Rientjes pgprot_t prot, int node, void *caller) 1551930fc45aSChristoph Lameter { 1552*d0a21265SDavid Rientjes struct vm_struct *area; 1553*d0a21265SDavid Rientjes void *addr; 1554*d0a21265SDavid Rientjes unsigned long real_size = size; 1555*d0a21265SDavid Rientjes 1556*d0a21265SDavid Rientjes size = PAGE_ALIGN(size); 1557*d0a21265SDavid Rientjes if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1558*d0a21265SDavid Rientjes return NULL; 1559*d0a21265SDavid Rientjes 1560*d0a21265SDavid Rientjes area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node, 1561*d0a21265SDavid Rientjes gfp_mask, caller); 1562*d0a21265SDavid Rientjes 1563*d0a21265SDavid Rientjes if (!area) 1564*d0a21265SDavid Rientjes return NULL; 1565*d0a21265SDavid Rientjes 1566*d0a21265SDavid Rientjes addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); 156789219d37SCatalin Marinas 156889219d37SCatalin Marinas /* 156989219d37SCatalin Marinas * A ref_count = 3 is needed because the vm_struct and vmap_area 157089219d37SCatalin Marinas * structures allocated in the __get_vm_area_node() function contain 157189219d37SCatalin Marinas * references to the virtual address of the vmalloc'ed block. 157289219d37SCatalin Marinas */ 1573*d0a21265SDavid Rientjes kmemleak_alloc(addr, real_size, 3, gfp_mask); 157489219d37SCatalin Marinas 157589219d37SCatalin Marinas return addr; 1576930fc45aSChristoph Lameter } 1577930fc45aSChristoph Lameter 15781da177e4SLinus Torvalds /** 1579930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 15801da177e4SLinus Torvalds * @size: allocation size 15812dca6999SDavid Miller * @align: desired alignment 15821da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 15831da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 1584d44e0780SRandy Dunlap * @node: node to use for allocation or -1 1585c85d194bSRandy Dunlap * @caller: caller's return address 15861da177e4SLinus Torvalds * 15871da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 15881da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 15891da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 15901da177e4SLinus Torvalds */ 15912dca6999SDavid Miller static void *__vmalloc_node(unsigned long size, unsigned long align, 15922dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 159323016969SChristoph Lameter int node, void *caller) 15941da177e4SLinus Torvalds { 1595*d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 1596*d0a21265SDavid Rientjes gfp_mask, prot, node, caller); 15971da177e4SLinus Torvalds } 15981da177e4SLinus Torvalds 1599930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1600930fc45aSChristoph Lameter { 16012dca6999SDavid Miller return __vmalloc_node(size, 1, gfp_mask, prot, -1, 160223016969SChristoph Lameter __builtin_return_address(0)); 1603930fc45aSChristoph Lameter } 16041da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 16051da177e4SLinus Torvalds 1606e1ca7788SDave Young static inline void *__vmalloc_node_flags(unsigned long size, 1607e1ca7788SDave Young int node, gfp_t flags) 1608e1ca7788SDave Young { 1609e1ca7788SDave Young return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 1610e1ca7788SDave Young node, __builtin_return_address(0)); 1611e1ca7788SDave Young } 1612e1ca7788SDave Young 16131da177e4SLinus Torvalds /** 16141da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 16151da177e4SLinus Torvalds * @size: allocation size 16161da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 16171da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 16181da177e4SLinus Torvalds * 1619c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 16201da177e4SLinus Torvalds * use __vmalloc() instead. 16211da177e4SLinus Torvalds */ 16221da177e4SLinus Torvalds void *vmalloc(unsigned long size) 16231da177e4SLinus Torvalds { 1624e1ca7788SDave Young return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM); 16251da177e4SLinus Torvalds } 16261da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 16271da177e4SLinus Torvalds 1628930fc45aSChristoph Lameter /** 1629e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 1630e1ca7788SDave Young * @size: allocation size 1631e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 1632e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 1633e1ca7788SDave Young * The memory allocated is set to zero. 1634e1ca7788SDave Young * 1635e1ca7788SDave Young * For tight control over page level allocator and protection flags 1636e1ca7788SDave Young * use __vmalloc() instead. 1637e1ca7788SDave Young */ 1638e1ca7788SDave Young void *vzalloc(unsigned long size) 1639e1ca7788SDave Young { 1640e1ca7788SDave Young return __vmalloc_node_flags(size, -1, 1641e1ca7788SDave Young GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1642e1ca7788SDave Young } 1643e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 1644e1ca7788SDave Young 1645e1ca7788SDave Young /** 1646ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 164783342314SNick Piggin * @size: allocation size 1648ead04089SRolf Eike Beer * 1649ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 1650ead04089SRolf Eike Beer * without leaking data. 165183342314SNick Piggin */ 165283342314SNick Piggin void *vmalloc_user(unsigned long size) 165383342314SNick Piggin { 165483342314SNick Piggin struct vm_struct *area; 165583342314SNick Piggin void *ret; 165683342314SNick Piggin 16572dca6999SDavid Miller ret = __vmalloc_node(size, SHMLBA, 16582dca6999SDavid Miller GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 165984877848SGlauber Costa PAGE_KERNEL, -1, __builtin_return_address(0)); 16602b4ac44eSEric Dumazet if (ret) { 1661db64fe02SNick Piggin area = find_vm_area(ret); 166283342314SNick Piggin area->flags |= VM_USERMAP; 16632b4ac44eSEric Dumazet } 166483342314SNick Piggin return ret; 166583342314SNick Piggin } 166683342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 166783342314SNick Piggin 166883342314SNick Piggin /** 1669930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 1670930fc45aSChristoph Lameter * @size: allocation size 1671d44e0780SRandy Dunlap * @node: numa node 1672930fc45aSChristoph Lameter * 1673930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 1674930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 1675930fc45aSChristoph Lameter * 1676c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 1677930fc45aSChristoph Lameter * use __vmalloc() instead. 1678930fc45aSChristoph Lameter */ 1679930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 1680930fc45aSChristoph Lameter { 16812dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 168223016969SChristoph Lameter node, __builtin_return_address(0)); 1683930fc45aSChristoph Lameter } 1684930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 1685930fc45aSChristoph Lameter 1686e1ca7788SDave Young /** 1687e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 1688e1ca7788SDave Young * @size: allocation size 1689e1ca7788SDave Young * @node: numa node 1690e1ca7788SDave Young * 1691e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 1692e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 1693e1ca7788SDave Young * The memory allocated is set to zero. 1694e1ca7788SDave Young * 1695e1ca7788SDave Young * For tight control over page level allocator and protection flags 1696e1ca7788SDave Young * use __vmalloc_node() instead. 1697e1ca7788SDave Young */ 1698e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 1699e1ca7788SDave Young { 1700e1ca7788SDave Young return __vmalloc_node_flags(size, node, 1701e1ca7788SDave Young GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1702e1ca7788SDave Young } 1703e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 1704e1ca7788SDave Young 17054dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC 17064dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL 17074dc3b16bSPavel Pisa #endif 17084dc3b16bSPavel Pisa 17091da177e4SLinus Torvalds /** 17101da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 17111da177e4SLinus Torvalds * @size: allocation size 17121da177e4SLinus Torvalds * 17131da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 17141da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 17151da177e4SLinus Torvalds * executable kernel virtual space. 17161da177e4SLinus Torvalds * 1717c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 17181da177e4SLinus Torvalds * use __vmalloc() instead. 17191da177e4SLinus Torvalds */ 17201da177e4SLinus Torvalds 17211da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 17221da177e4SLinus Torvalds { 17232dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 172484877848SGlauber Costa -1, __builtin_return_address(0)); 17251da177e4SLinus Torvalds } 17261da177e4SLinus Torvalds 17270d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 17287ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 17290d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 17307ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 17310d08e0d3SAndi Kleen #else 17320d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL 17330d08e0d3SAndi Kleen #endif 17340d08e0d3SAndi Kleen 17351da177e4SLinus Torvalds /** 17361da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 17371da177e4SLinus Torvalds * @size: allocation size 17381da177e4SLinus Torvalds * 17391da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 17401da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 17411da177e4SLinus Torvalds */ 17421da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 17431da177e4SLinus Torvalds { 17442dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 174584877848SGlauber Costa -1, __builtin_return_address(0)); 17461da177e4SLinus Torvalds } 17471da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 17481da177e4SLinus Torvalds 174983342314SNick Piggin /** 1750ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 175183342314SNick Piggin * @size: allocation size 1752ead04089SRolf Eike Beer * 1753ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 1754ead04089SRolf Eike Beer * mapped to userspace without leaking data. 175583342314SNick Piggin */ 175683342314SNick Piggin void *vmalloc_32_user(unsigned long size) 175783342314SNick Piggin { 175883342314SNick Piggin struct vm_struct *area; 175983342314SNick Piggin void *ret; 176083342314SNick Piggin 17612dca6999SDavid Miller ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 176284877848SGlauber Costa -1, __builtin_return_address(0)); 17632b4ac44eSEric Dumazet if (ret) { 1764db64fe02SNick Piggin area = find_vm_area(ret); 176583342314SNick Piggin area->flags |= VM_USERMAP; 17662b4ac44eSEric Dumazet } 176783342314SNick Piggin return ret; 176883342314SNick Piggin } 176983342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 177083342314SNick Piggin 1771d0107eb0SKAMEZAWA Hiroyuki /* 1772d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 1773d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 1774d0107eb0SKAMEZAWA Hiroyuki */ 1775d0107eb0SKAMEZAWA Hiroyuki 1776d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 1777d0107eb0SKAMEZAWA Hiroyuki { 1778d0107eb0SKAMEZAWA Hiroyuki struct page *p; 1779d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 1780d0107eb0SKAMEZAWA Hiroyuki 1781d0107eb0SKAMEZAWA Hiroyuki while (count) { 1782d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 1783d0107eb0SKAMEZAWA Hiroyuki 1784d0107eb0SKAMEZAWA Hiroyuki offset = (unsigned long)addr & ~PAGE_MASK; 1785d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 1786d0107eb0SKAMEZAWA Hiroyuki if (length > count) 1787d0107eb0SKAMEZAWA Hiroyuki length = count; 1788d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 1789d0107eb0SKAMEZAWA Hiroyuki /* 1790d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 1791d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 1792d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 1793d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 1794d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 1795d0107eb0SKAMEZAWA Hiroyuki */ 1796d0107eb0SKAMEZAWA Hiroyuki if (p) { 1797d0107eb0SKAMEZAWA Hiroyuki /* 1798d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 1799d0107eb0SKAMEZAWA Hiroyuki * function description) 1800d0107eb0SKAMEZAWA Hiroyuki */ 1801d0107eb0SKAMEZAWA Hiroyuki void *map = kmap_atomic(p, KM_USER0); 1802d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 1803d0107eb0SKAMEZAWA Hiroyuki kunmap_atomic(map, KM_USER0); 1804d0107eb0SKAMEZAWA Hiroyuki } else 1805d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 1806d0107eb0SKAMEZAWA Hiroyuki 1807d0107eb0SKAMEZAWA Hiroyuki addr += length; 1808d0107eb0SKAMEZAWA Hiroyuki buf += length; 1809d0107eb0SKAMEZAWA Hiroyuki copied += length; 1810d0107eb0SKAMEZAWA Hiroyuki count -= length; 1811d0107eb0SKAMEZAWA Hiroyuki } 1812d0107eb0SKAMEZAWA Hiroyuki return copied; 1813d0107eb0SKAMEZAWA Hiroyuki } 1814d0107eb0SKAMEZAWA Hiroyuki 1815d0107eb0SKAMEZAWA Hiroyuki static int aligned_vwrite(char *buf, char *addr, unsigned long count) 1816d0107eb0SKAMEZAWA Hiroyuki { 1817d0107eb0SKAMEZAWA Hiroyuki struct page *p; 1818d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 1819d0107eb0SKAMEZAWA Hiroyuki 1820d0107eb0SKAMEZAWA Hiroyuki while (count) { 1821d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 1822d0107eb0SKAMEZAWA Hiroyuki 1823d0107eb0SKAMEZAWA Hiroyuki offset = (unsigned long)addr & ~PAGE_MASK; 1824d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 1825d0107eb0SKAMEZAWA Hiroyuki if (length > count) 1826d0107eb0SKAMEZAWA Hiroyuki length = count; 1827d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 1828d0107eb0SKAMEZAWA Hiroyuki /* 1829d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 1830d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 1831d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 1832d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 1833d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 1834d0107eb0SKAMEZAWA Hiroyuki */ 1835d0107eb0SKAMEZAWA Hiroyuki if (p) { 1836d0107eb0SKAMEZAWA Hiroyuki /* 1837d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 1838d0107eb0SKAMEZAWA Hiroyuki * function description) 1839d0107eb0SKAMEZAWA Hiroyuki */ 1840d0107eb0SKAMEZAWA Hiroyuki void *map = kmap_atomic(p, KM_USER0); 1841d0107eb0SKAMEZAWA Hiroyuki memcpy(map + offset, buf, length); 1842d0107eb0SKAMEZAWA Hiroyuki kunmap_atomic(map, KM_USER0); 1843d0107eb0SKAMEZAWA Hiroyuki } 1844d0107eb0SKAMEZAWA Hiroyuki addr += length; 1845d0107eb0SKAMEZAWA Hiroyuki buf += length; 1846d0107eb0SKAMEZAWA Hiroyuki copied += length; 1847d0107eb0SKAMEZAWA Hiroyuki count -= length; 1848d0107eb0SKAMEZAWA Hiroyuki } 1849d0107eb0SKAMEZAWA Hiroyuki return copied; 1850d0107eb0SKAMEZAWA Hiroyuki } 1851d0107eb0SKAMEZAWA Hiroyuki 1852d0107eb0SKAMEZAWA Hiroyuki /** 1853d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 1854d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 1855d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 1856d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 1857d0107eb0SKAMEZAWA Hiroyuki * 1858d0107eb0SKAMEZAWA Hiroyuki * Returns # of bytes which addr and buf should be increased. 1859d0107eb0SKAMEZAWA Hiroyuki * (same number to @count). Returns 0 if [addr...addr+count) doesn't 1860d0107eb0SKAMEZAWA Hiroyuki * includes any intersect with alive vmalloc area. 1861d0107eb0SKAMEZAWA Hiroyuki * 1862d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 1863d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 1864d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 1865d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 1866d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 1867d0107eb0SKAMEZAWA Hiroyuki * 1868d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 1869d0107eb0SKAMEZAWA Hiroyuki * vm_struct area, returns 0. 1870d0107eb0SKAMEZAWA Hiroyuki * @buf should be kernel's buffer. Because this function uses KM_USER0, 1871d0107eb0SKAMEZAWA Hiroyuki * the caller should guarantee KM_USER0 is not used. 1872d0107eb0SKAMEZAWA Hiroyuki * 1873d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 1874d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 1875d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 1876d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 1877d0107eb0SKAMEZAWA Hiroyuki * 1878d0107eb0SKAMEZAWA Hiroyuki */ 1879d0107eb0SKAMEZAWA Hiroyuki 18801da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 18811da177e4SLinus Torvalds { 18821da177e4SLinus Torvalds struct vm_struct *tmp; 18831da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 1884d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 18851da177e4SLinus Torvalds unsigned long n; 18861da177e4SLinus Torvalds 18871da177e4SLinus Torvalds /* Don't allow overflow */ 18881da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 18891da177e4SLinus Torvalds count = -(unsigned long) addr; 18901da177e4SLinus Torvalds 18911da177e4SLinus Torvalds read_lock(&vmlist_lock); 1892d0107eb0SKAMEZAWA Hiroyuki for (tmp = vmlist; count && tmp; tmp = tmp->next) { 18931da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 18941da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 18951da177e4SLinus Torvalds continue; 18961da177e4SLinus Torvalds while (addr < vaddr) { 18971da177e4SLinus Torvalds if (count == 0) 18981da177e4SLinus Torvalds goto finished; 18991da177e4SLinus Torvalds *buf = '\0'; 19001da177e4SLinus Torvalds buf++; 19011da177e4SLinus Torvalds addr++; 19021da177e4SLinus Torvalds count--; 19031da177e4SLinus Torvalds } 19041da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 1905d0107eb0SKAMEZAWA Hiroyuki if (n > count) 1906d0107eb0SKAMEZAWA Hiroyuki n = count; 1907d0107eb0SKAMEZAWA Hiroyuki if (!(tmp->flags & VM_IOREMAP)) 1908d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 1909d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 1910d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 1911d0107eb0SKAMEZAWA Hiroyuki buf += n; 1912d0107eb0SKAMEZAWA Hiroyuki addr += n; 1913d0107eb0SKAMEZAWA Hiroyuki count -= n; 19141da177e4SLinus Torvalds } 19151da177e4SLinus Torvalds finished: 19161da177e4SLinus Torvalds read_unlock(&vmlist_lock); 1917d0107eb0SKAMEZAWA Hiroyuki 1918d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 1919d0107eb0SKAMEZAWA Hiroyuki return 0; 1920d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 1921d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 1922d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 1923d0107eb0SKAMEZAWA Hiroyuki 1924d0107eb0SKAMEZAWA Hiroyuki return buflen; 19251da177e4SLinus Torvalds } 19261da177e4SLinus Torvalds 1927d0107eb0SKAMEZAWA Hiroyuki /** 1928d0107eb0SKAMEZAWA Hiroyuki * vwrite() - write vmalloc area in a safe way. 1929d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for source data 1930d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 1931d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 1932d0107eb0SKAMEZAWA Hiroyuki * 1933d0107eb0SKAMEZAWA Hiroyuki * Returns # of bytes which addr and buf should be incresed. 1934d0107eb0SKAMEZAWA Hiroyuki * (same number to @count). 1935d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersect with valid 1936d0107eb0SKAMEZAWA Hiroyuki * vmalloc area, returns 0. 1937d0107eb0SKAMEZAWA Hiroyuki * 1938d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 1939d0107eb0SKAMEZAWA Hiroyuki * copy data from a buffer to the given addr. If specified range of 1940d0107eb0SKAMEZAWA Hiroyuki * [addr...addr+count) includes some valid address, data is copied from 1941d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, no copy to hole. 1942d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 1943d0107eb0SKAMEZAWA Hiroyuki * 1944d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 1945d0107eb0SKAMEZAWA Hiroyuki * vm_struct area, returns 0. 1946d0107eb0SKAMEZAWA Hiroyuki * @buf should be kernel's buffer. Because this function uses KM_USER0, 1947d0107eb0SKAMEZAWA Hiroyuki * the caller should guarantee KM_USER0 is not used. 1948d0107eb0SKAMEZAWA Hiroyuki * 1949d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vwrite() is never necessary because the caller 1950d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 1951d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 1952d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 1953d0107eb0SKAMEZAWA Hiroyuki * 1954d0107eb0SKAMEZAWA Hiroyuki * The caller should guarantee KM_USER1 is not used. 1955d0107eb0SKAMEZAWA Hiroyuki */ 1956d0107eb0SKAMEZAWA Hiroyuki 19571da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 19581da177e4SLinus Torvalds { 19591da177e4SLinus Torvalds struct vm_struct *tmp; 1960d0107eb0SKAMEZAWA Hiroyuki char *vaddr; 1961d0107eb0SKAMEZAWA Hiroyuki unsigned long n, buflen; 1962d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 19631da177e4SLinus Torvalds 19641da177e4SLinus Torvalds /* Don't allow overflow */ 19651da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 19661da177e4SLinus Torvalds count = -(unsigned long) addr; 1967d0107eb0SKAMEZAWA Hiroyuki buflen = count; 19681da177e4SLinus Torvalds 19691da177e4SLinus Torvalds read_lock(&vmlist_lock); 1970d0107eb0SKAMEZAWA Hiroyuki for (tmp = vmlist; count && tmp; tmp = tmp->next) { 19711da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 19721da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 19731da177e4SLinus Torvalds continue; 19741da177e4SLinus Torvalds while (addr < vaddr) { 19751da177e4SLinus Torvalds if (count == 0) 19761da177e4SLinus Torvalds goto finished; 19771da177e4SLinus Torvalds buf++; 19781da177e4SLinus Torvalds addr++; 19791da177e4SLinus Torvalds count--; 19801da177e4SLinus Torvalds } 19811da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 1982d0107eb0SKAMEZAWA Hiroyuki if (n > count) 1983d0107eb0SKAMEZAWA Hiroyuki n = count; 1984d0107eb0SKAMEZAWA Hiroyuki if (!(tmp->flags & VM_IOREMAP)) { 1985d0107eb0SKAMEZAWA Hiroyuki aligned_vwrite(buf, addr, n); 1986d0107eb0SKAMEZAWA Hiroyuki copied++; 1987d0107eb0SKAMEZAWA Hiroyuki } 1988d0107eb0SKAMEZAWA Hiroyuki buf += n; 1989d0107eb0SKAMEZAWA Hiroyuki addr += n; 1990d0107eb0SKAMEZAWA Hiroyuki count -= n; 19911da177e4SLinus Torvalds } 19921da177e4SLinus Torvalds finished: 19931da177e4SLinus Torvalds read_unlock(&vmlist_lock); 1994d0107eb0SKAMEZAWA Hiroyuki if (!copied) 1995d0107eb0SKAMEZAWA Hiroyuki return 0; 1996d0107eb0SKAMEZAWA Hiroyuki return buflen; 19971da177e4SLinus Torvalds } 199883342314SNick Piggin 199983342314SNick Piggin /** 200083342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 200183342314SNick Piggin * @vma: vma to cover (map full range of vma) 200283342314SNick Piggin * @addr: vmalloc memory 200383342314SNick Piggin * @pgoff: number of pages into addr before first page to map 20047682486bSRandy Dunlap * 20057682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 200683342314SNick Piggin * 200783342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 200883342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 200983342314SNick Piggin * that criteria isn't met. 201083342314SNick Piggin * 201172fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 201283342314SNick Piggin */ 201383342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 201483342314SNick Piggin unsigned long pgoff) 201583342314SNick Piggin { 201683342314SNick Piggin struct vm_struct *area; 201783342314SNick Piggin unsigned long uaddr = vma->vm_start; 201883342314SNick Piggin unsigned long usize = vma->vm_end - vma->vm_start; 201983342314SNick Piggin 202083342314SNick Piggin if ((PAGE_SIZE-1) & (unsigned long)addr) 202183342314SNick Piggin return -EINVAL; 202283342314SNick Piggin 2023db64fe02SNick Piggin area = find_vm_area(addr); 202483342314SNick Piggin if (!area) 2025db64fe02SNick Piggin return -EINVAL; 202683342314SNick Piggin 202783342314SNick Piggin if (!(area->flags & VM_USERMAP)) 2028db64fe02SNick Piggin return -EINVAL; 202983342314SNick Piggin 203083342314SNick Piggin if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 2031db64fe02SNick Piggin return -EINVAL; 203283342314SNick Piggin 203383342314SNick Piggin addr += pgoff << PAGE_SHIFT; 203483342314SNick Piggin do { 203583342314SNick Piggin struct page *page = vmalloc_to_page(addr); 2036db64fe02SNick Piggin int ret; 2037db64fe02SNick Piggin 203883342314SNick Piggin ret = vm_insert_page(vma, uaddr, page); 203983342314SNick Piggin if (ret) 204083342314SNick Piggin return ret; 204183342314SNick Piggin 204283342314SNick Piggin uaddr += PAGE_SIZE; 204383342314SNick Piggin addr += PAGE_SIZE; 204483342314SNick Piggin usize -= PAGE_SIZE; 204583342314SNick Piggin } while (usize > 0); 204683342314SNick Piggin 204783342314SNick Piggin /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 204883342314SNick Piggin vma->vm_flags |= VM_RESERVED; 204983342314SNick Piggin 2050db64fe02SNick Piggin return 0; 205183342314SNick Piggin } 205283342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 205383342314SNick Piggin 20541eeb66a1SChristoph Hellwig /* 20551eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 20561eeb66a1SChristoph Hellwig * have one. 20571eeb66a1SChristoph Hellwig */ 20581eeb66a1SChristoph Hellwig void __attribute__((weak)) vmalloc_sync_all(void) 20591eeb66a1SChristoph Hellwig { 20601eeb66a1SChristoph Hellwig } 20615f4352fbSJeremy Fitzhardinge 20625f4352fbSJeremy Fitzhardinge 20632f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 20645f4352fbSJeremy Fitzhardinge { 20655f4352fbSJeremy Fitzhardinge /* apply_to_page_range() does all the hard work. */ 20665f4352fbSJeremy Fitzhardinge return 0; 20675f4352fbSJeremy Fitzhardinge } 20685f4352fbSJeremy Fitzhardinge 20695f4352fbSJeremy Fitzhardinge /** 20705f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 20715f4352fbSJeremy Fitzhardinge * @size: size of the area 20727682486bSRandy Dunlap * 20737682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 20745f4352fbSJeremy Fitzhardinge * 20755f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 20765f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 20775f4352fbSJeremy Fitzhardinge * are created. If the kernel address space is not shared 20785f4352fbSJeremy Fitzhardinge * between processes, it syncs the pagetable across all 20795f4352fbSJeremy Fitzhardinge * processes. 20805f4352fbSJeremy Fitzhardinge */ 20815f4352fbSJeremy Fitzhardinge struct vm_struct *alloc_vm_area(size_t size) 20825f4352fbSJeremy Fitzhardinge { 20835f4352fbSJeremy Fitzhardinge struct vm_struct *area; 20845f4352fbSJeremy Fitzhardinge 208523016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 208623016969SChristoph Lameter __builtin_return_address(0)); 20875f4352fbSJeremy Fitzhardinge if (area == NULL) 20885f4352fbSJeremy Fitzhardinge return NULL; 20895f4352fbSJeremy Fitzhardinge 20905f4352fbSJeremy Fitzhardinge /* 20915f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 20925f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 20935f4352fbSJeremy Fitzhardinge */ 20945f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 20955f4352fbSJeremy Fitzhardinge area->size, f, NULL)) { 20965f4352fbSJeremy Fitzhardinge free_vm_area(area); 20975f4352fbSJeremy Fitzhardinge return NULL; 20985f4352fbSJeremy Fitzhardinge } 20995f4352fbSJeremy Fitzhardinge 21005f4352fbSJeremy Fitzhardinge /* Make sure the pagetables are constructed in process kernel 21015f4352fbSJeremy Fitzhardinge mappings */ 21025f4352fbSJeremy Fitzhardinge vmalloc_sync_all(); 21035f4352fbSJeremy Fitzhardinge 21045f4352fbSJeremy Fitzhardinge return area; 21055f4352fbSJeremy Fitzhardinge } 21065f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 21075f4352fbSJeremy Fitzhardinge 21085f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 21095f4352fbSJeremy Fitzhardinge { 21105f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 21115f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 21125f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 21135f4352fbSJeremy Fitzhardinge kfree(area); 21145f4352fbSJeremy Fitzhardinge } 21155f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 2116a10aa579SChristoph Lameter 21174f8b02b4STejun Heo #ifdef CONFIG_SMP 2118ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 2119ca23e405STejun Heo { 2120ca23e405STejun Heo return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; 2121ca23e405STejun Heo } 2122ca23e405STejun Heo 2123ca23e405STejun Heo /** 2124ca23e405STejun Heo * pvm_find_next_prev - find the next and prev vmap_area surrounding @end 2125ca23e405STejun Heo * @end: target address 2126ca23e405STejun Heo * @pnext: out arg for the next vmap_area 2127ca23e405STejun Heo * @pprev: out arg for the previous vmap_area 2128ca23e405STejun Heo * 2129ca23e405STejun Heo * Returns: %true if either or both of next and prev are found, 2130ca23e405STejun Heo * %false if no vmap_area exists 2131ca23e405STejun Heo * 2132ca23e405STejun Heo * Find vmap_areas end addresses of which enclose @end. ie. if not 2133ca23e405STejun Heo * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. 2134ca23e405STejun Heo */ 2135ca23e405STejun Heo static bool pvm_find_next_prev(unsigned long end, 2136ca23e405STejun Heo struct vmap_area **pnext, 2137ca23e405STejun Heo struct vmap_area **pprev) 2138ca23e405STejun Heo { 2139ca23e405STejun Heo struct rb_node *n = vmap_area_root.rb_node; 2140ca23e405STejun Heo struct vmap_area *va = NULL; 2141ca23e405STejun Heo 2142ca23e405STejun Heo while (n) { 2143ca23e405STejun Heo va = rb_entry(n, struct vmap_area, rb_node); 2144ca23e405STejun Heo if (end < va->va_end) 2145ca23e405STejun Heo n = n->rb_left; 2146ca23e405STejun Heo else if (end > va->va_end) 2147ca23e405STejun Heo n = n->rb_right; 2148ca23e405STejun Heo else 2149ca23e405STejun Heo break; 2150ca23e405STejun Heo } 2151ca23e405STejun Heo 2152ca23e405STejun Heo if (!va) 2153ca23e405STejun Heo return false; 2154ca23e405STejun Heo 2155ca23e405STejun Heo if (va->va_end > end) { 2156ca23e405STejun Heo *pnext = va; 2157ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2158ca23e405STejun Heo } else { 2159ca23e405STejun Heo *pprev = va; 2160ca23e405STejun Heo *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); 2161ca23e405STejun Heo } 2162ca23e405STejun Heo return true; 2163ca23e405STejun Heo } 2164ca23e405STejun Heo 2165ca23e405STejun Heo /** 2166ca23e405STejun Heo * pvm_determine_end - find the highest aligned address between two vmap_areas 2167ca23e405STejun Heo * @pnext: in/out arg for the next vmap_area 2168ca23e405STejun Heo * @pprev: in/out arg for the previous vmap_area 2169ca23e405STejun Heo * @align: alignment 2170ca23e405STejun Heo * 2171ca23e405STejun Heo * Returns: determined end address 2172ca23e405STejun Heo * 2173ca23e405STejun Heo * Find the highest aligned address between *@pnext and *@pprev below 2174ca23e405STejun Heo * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned 2175ca23e405STejun Heo * down address is between the end addresses of the two vmap_areas. 2176ca23e405STejun Heo * 2177ca23e405STejun Heo * Please note that the address returned by this function may fall 2178ca23e405STejun Heo * inside *@pnext vmap_area. The caller is responsible for checking 2179ca23e405STejun Heo * that. 2180ca23e405STejun Heo */ 2181ca23e405STejun Heo static unsigned long pvm_determine_end(struct vmap_area **pnext, 2182ca23e405STejun Heo struct vmap_area **pprev, 2183ca23e405STejun Heo unsigned long align) 2184ca23e405STejun Heo { 2185ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2186ca23e405STejun Heo unsigned long addr; 2187ca23e405STejun Heo 2188ca23e405STejun Heo if (*pnext) 2189ca23e405STejun Heo addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); 2190ca23e405STejun Heo else 2191ca23e405STejun Heo addr = vmalloc_end; 2192ca23e405STejun Heo 2193ca23e405STejun Heo while (*pprev && (*pprev)->va_end > addr) { 2194ca23e405STejun Heo *pnext = *pprev; 2195ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2196ca23e405STejun Heo } 2197ca23e405STejun Heo 2198ca23e405STejun Heo return addr; 2199ca23e405STejun Heo } 2200ca23e405STejun Heo 2201ca23e405STejun Heo /** 2202ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 2203ca23e405STejun Heo * @offsets: array containing offset of each area 2204ca23e405STejun Heo * @sizes: array containing size of each area 2205ca23e405STejun Heo * @nr_vms: the number of areas to allocate 2206ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2207ca23e405STejun Heo * 2208ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2209ca23e405STejun Heo * vm_structs on success, %NULL on failure 2210ca23e405STejun Heo * 2211ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 2212ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 2213ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 2214ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 2215ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 2216ec3f64fcSDavid Rientjes * areas are allocated from top. 2217ca23e405STejun Heo * 2218ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 2219ca23e405STejun Heo * does everything top-down and scans areas from the end looking for 2220ca23e405STejun Heo * matching slot. While scanning, if any of the areas overlaps with 2221ca23e405STejun Heo * existing vmap_area, the base address is pulled down to fit the 2222ca23e405STejun Heo * area. Scanning is repeated till all the areas fit and then all 2223ca23e405STejun Heo * necessary data structres are inserted and the result is returned. 2224ca23e405STejun Heo */ 2225ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2226ca23e405STejun Heo const size_t *sizes, int nr_vms, 2227ec3f64fcSDavid Rientjes size_t align) 2228ca23e405STejun Heo { 2229ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2230ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2231ca23e405STejun Heo struct vmap_area **vas, *prev, *next; 2232ca23e405STejun Heo struct vm_struct **vms; 2233ca23e405STejun Heo int area, area2, last_area, term_area; 2234ca23e405STejun Heo unsigned long base, start, end, last_end; 2235ca23e405STejun Heo bool purged = false; 2236ca23e405STejun Heo 2237ca23e405STejun Heo /* verify parameters and allocate data structures */ 2238ca23e405STejun Heo BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); 2239ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 2240ca23e405STejun Heo start = offsets[area]; 2241ca23e405STejun Heo end = start + sizes[area]; 2242ca23e405STejun Heo 2243ca23e405STejun Heo /* is everything aligned properly? */ 2244ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 2245ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 2246ca23e405STejun Heo 2247ca23e405STejun Heo /* detect the area with the highest address */ 2248ca23e405STejun Heo if (start > offsets[last_area]) 2249ca23e405STejun Heo last_area = area; 2250ca23e405STejun Heo 2251ca23e405STejun Heo for (area2 = 0; area2 < nr_vms; area2++) { 2252ca23e405STejun Heo unsigned long start2 = offsets[area2]; 2253ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 2254ca23e405STejun Heo 2255ca23e405STejun Heo if (area2 == area) 2256ca23e405STejun Heo continue; 2257ca23e405STejun Heo 2258ca23e405STejun Heo BUG_ON(start2 >= start && start2 < end); 2259ca23e405STejun Heo BUG_ON(end2 <= end && end2 > start); 2260ca23e405STejun Heo } 2261ca23e405STejun Heo } 2262ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 2263ca23e405STejun Heo 2264ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 2265ca23e405STejun Heo WARN_ON(true); 2266ca23e405STejun Heo return NULL; 2267ca23e405STejun Heo } 2268ca23e405STejun Heo 2269ec3f64fcSDavid Rientjes vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL); 2270ec3f64fcSDavid Rientjes vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL); 2271ca23e405STejun Heo if (!vas || !vms) 2272ca23e405STejun Heo goto err_free; 2273ca23e405STejun Heo 2274ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2275ec3f64fcSDavid Rientjes vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); 2276ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 2277ca23e405STejun Heo if (!vas[area] || !vms[area]) 2278ca23e405STejun Heo goto err_free; 2279ca23e405STejun Heo } 2280ca23e405STejun Heo retry: 2281ca23e405STejun Heo spin_lock(&vmap_area_lock); 2282ca23e405STejun Heo 2283ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 2284ca23e405STejun Heo area = term_area = last_area; 2285ca23e405STejun Heo start = offsets[area]; 2286ca23e405STejun Heo end = start + sizes[area]; 2287ca23e405STejun Heo 2288ca23e405STejun Heo if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { 2289ca23e405STejun Heo base = vmalloc_end - last_end; 2290ca23e405STejun Heo goto found; 2291ca23e405STejun Heo } 2292ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2293ca23e405STejun Heo 2294ca23e405STejun Heo while (true) { 2295ca23e405STejun Heo BUG_ON(next && next->va_end <= base + end); 2296ca23e405STejun Heo BUG_ON(prev && prev->va_end > base + end); 2297ca23e405STejun Heo 2298ca23e405STejun Heo /* 2299ca23e405STejun Heo * base might have underflowed, add last_end before 2300ca23e405STejun Heo * comparing. 2301ca23e405STejun Heo */ 2302ca23e405STejun Heo if (base + last_end < vmalloc_start + last_end) { 2303ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2304ca23e405STejun Heo if (!purged) { 2305ca23e405STejun Heo purge_vmap_area_lazy(); 2306ca23e405STejun Heo purged = true; 2307ca23e405STejun Heo goto retry; 2308ca23e405STejun Heo } 2309ca23e405STejun Heo goto err_free; 2310ca23e405STejun Heo } 2311ca23e405STejun Heo 2312ca23e405STejun Heo /* 2313ca23e405STejun Heo * If next overlaps, move base downwards so that it's 2314ca23e405STejun Heo * right below next and then recheck. 2315ca23e405STejun Heo */ 2316ca23e405STejun Heo if (next && next->va_start < base + end) { 2317ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2318ca23e405STejun Heo term_area = area; 2319ca23e405STejun Heo continue; 2320ca23e405STejun Heo } 2321ca23e405STejun Heo 2322ca23e405STejun Heo /* 2323ca23e405STejun Heo * If prev overlaps, shift down next and prev and move 2324ca23e405STejun Heo * base so that it's right below new next and then 2325ca23e405STejun Heo * recheck. 2326ca23e405STejun Heo */ 2327ca23e405STejun Heo if (prev && prev->va_end > base + start) { 2328ca23e405STejun Heo next = prev; 2329ca23e405STejun Heo prev = node_to_va(rb_prev(&next->rb_node)); 2330ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2331ca23e405STejun Heo term_area = area; 2332ca23e405STejun Heo continue; 2333ca23e405STejun Heo } 2334ca23e405STejun Heo 2335ca23e405STejun Heo /* 2336ca23e405STejun Heo * This area fits, move on to the previous one. If 2337ca23e405STejun Heo * the previous one is the terminal one, we're done. 2338ca23e405STejun Heo */ 2339ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 2340ca23e405STejun Heo if (area == term_area) 2341ca23e405STejun Heo break; 2342ca23e405STejun Heo start = offsets[area]; 2343ca23e405STejun Heo end = start + sizes[area]; 2344ca23e405STejun Heo pvm_find_next_prev(base + end, &next, &prev); 2345ca23e405STejun Heo } 2346ca23e405STejun Heo found: 2347ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 2348ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2349ca23e405STejun Heo struct vmap_area *va = vas[area]; 2350ca23e405STejun Heo 2351ca23e405STejun Heo va->va_start = base + offsets[area]; 2352ca23e405STejun Heo va->va_end = va->va_start + sizes[area]; 2353ca23e405STejun Heo __insert_vmap_area(va); 2354ca23e405STejun Heo } 2355ca23e405STejun Heo 2356ca23e405STejun Heo vmap_area_pcpu_hole = base + offsets[last_area]; 2357ca23e405STejun Heo 2358ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2359ca23e405STejun Heo 2360ca23e405STejun Heo /* insert all vm's */ 2361ca23e405STejun Heo for (area = 0; area < nr_vms; area++) 2362ca23e405STejun Heo insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2363ca23e405STejun Heo pcpu_get_vm_areas); 2364ca23e405STejun Heo 2365ca23e405STejun Heo kfree(vas); 2366ca23e405STejun Heo return vms; 2367ca23e405STejun Heo 2368ca23e405STejun Heo err_free: 2369ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2370ca23e405STejun Heo if (vas) 2371ca23e405STejun Heo kfree(vas[area]); 2372ca23e405STejun Heo if (vms) 2373ca23e405STejun Heo kfree(vms[area]); 2374ca23e405STejun Heo } 2375ca23e405STejun Heo kfree(vas); 2376ca23e405STejun Heo kfree(vms); 2377ca23e405STejun Heo return NULL; 2378ca23e405STejun Heo } 2379ca23e405STejun Heo 2380ca23e405STejun Heo /** 2381ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 2382ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 2383ca23e405STejun Heo * @nr_vms: the number of allocated areas 2384ca23e405STejun Heo * 2385ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 2386ca23e405STejun Heo */ 2387ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 2388ca23e405STejun Heo { 2389ca23e405STejun Heo int i; 2390ca23e405STejun Heo 2391ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 2392ca23e405STejun Heo free_vm_area(vms[i]); 2393ca23e405STejun Heo kfree(vms); 2394ca23e405STejun Heo } 23954f8b02b4STejun Heo #endif /* CONFIG_SMP */ 2396a10aa579SChristoph Lameter 2397a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 2398a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 2399e199b5d1SNamhyung Kim __acquires(&vmlist_lock) 2400a10aa579SChristoph Lameter { 2401a10aa579SChristoph Lameter loff_t n = *pos; 2402a10aa579SChristoph Lameter struct vm_struct *v; 2403a10aa579SChristoph Lameter 2404a10aa579SChristoph Lameter read_lock(&vmlist_lock); 2405a10aa579SChristoph Lameter v = vmlist; 2406a10aa579SChristoph Lameter while (n > 0 && v) { 2407a10aa579SChristoph Lameter n--; 2408a10aa579SChristoph Lameter v = v->next; 2409a10aa579SChristoph Lameter } 2410a10aa579SChristoph Lameter if (!n) 2411a10aa579SChristoph Lameter return v; 2412a10aa579SChristoph Lameter 2413a10aa579SChristoph Lameter return NULL; 2414a10aa579SChristoph Lameter 2415a10aa579SChristoph Lameter } 2416a10aa579SChristoph Lameter 2417a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 2418a10aa579SChristoph Lameter { 2419a10aa579SChristoph Lameter struct vm_struct *v = p; 2420a10aa579SChristoph Lameter 2421a10aa579SChristoph Lameter ++*pos; 2422a10aa579SChristoph Lameter return v->next; 2423a10aa579SChristoph Lameter } 2424a10aa579SChristoph Lameter 2425a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 2426e199b5d1SNamhyung Kim __releases(&vmlist_lock) 2427a10aa579SChristoph Lameter { 2428a10aa579SChristoph Lameter read_unlock(&vmlist_lock); 2429a10aa579SChristoph Lameter } 2430a10aa579SChristoph Lameter 2431a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 2432a47a126aSEric Dumazet { 2433a47a126aSEric Dumazet if (NUMA_BUILD) { 2434a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 2435a47a126aSEric Dumazet 2436a47a126aSEric Dumazet if (!counters) 2437a47a126aSEric Dumazet return; 2438a47a126aSEric Dumazet 2439a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2440a47a126aSEric Dumazet 2441a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 2442a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 2443a47a126aSEric Dumazet 2444a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 2445a47a126aSEric Dumazet if (counters[nr]) 2446a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 2447a47a126aSEric Dumazet } 2448a47a126aSEric Dumazet } 2449a47a126aSEric Dumazet 2450a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 2451a10aa579SChristoph Lameter { 2452a10aa579SChristoph Lameter struct vm_struct *v = p; 2453a10aa579SChristoph Lameter 2454a10aa579SChristoph Lameter seq_printf(m, "0x%p-0x%p %7ld", 2455a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 2456a10aa579SChristoph Lameter 245762c70bceSJoe Perches if (v->caller) 245862c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 245923016969SChristoph Lameter 2460a10aa579SChristoph Lameter if (v->nr_pages) 2461a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 2462a10aa579SChristoph Lameter 2463a10aa579SChristoph Lameter if (v->phys_addr) 2464ffa71f33SKenji Kaneshige seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr); 2465a10aa579SChristoph Lameter 2466a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 2467a10aa579SChristoph Lameter seq_printf(m, " ioremap"); 2468a10aa579SChristoph Lameter 2469a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 2470a10aa579SChristoph Lameter seq_printf(m, " vmalloc"); 2471a10aa579SChristoph Lameter 2472a10aa579SChristoph Lameter if (v->flags & VM_MAP) 2473a10aa579SChristoph Lameter seq_printf(m, " vmap"); 2474a10aa579SChristoph Lameter 2475a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 2476a10aa579SChristoph Lameter seq_printf(m, " user"); 2477a10aa579SChristoph Lameter 2478a10aa579SChristoph Lameter if (v->flags & VM_VPAGES) 2479a10aa579SChristoph Lameter seq_printf(m, " vpages"); 2480a10aa579SChristoph Lameter 2481a47a126aSEric Dumazet show_numa_info(m, v); 2482a10aa579SChristoph Lameter seq_putc(m, '\n'); 2483a10aa579SChristoph Lameter return 0; 2484a10aa579SChristoph Lameter } 2485a10aa579SChristoph Lameter 24865f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 2487a10aa579SChristoph Lameter .start = s_start, 2488a10aa579SChristoph Lameter .next = s_next, 2489a10aa579SChristoph Lameter .stop = s_stop, 2490a10aa579SChristoph Lameter .show = s_show, 2491a10aa579SChristoph Lameter }; 24925f6a6a9cSAlexey Dobriyan 24935f6a6a9cSAlexey Dobriyan static int vmalloc_open(struct inode *inode, struct file *file) 24945f6a6a9cSAlexey Dobriyan { 24955f6a6a9cSAlexey Dobriyan unsigned int *ptr = NULL; 24965f6a6a9cSAlexey Dobriyan int ret; 24975f6a6a9cSAlexey Dobriyan 249851980ac9SKulikov Vasiliy if (NUMA_BUILD) { 24995f6a6a9cSAlexey Dobriyan ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 250051980ac9SKulikov Vasiliy if (ptr == NULL) 250151980ac9SKulikov Vasiliy return -ENOMEM; 250251980ac9SKulikov Vasiliy } 25035f6a6a9cSAlexey Dobriyan ret = seq_open(file, &vmalloc_op); 25045f6a6a9cSAlexey Dobriyan if (!ret) { 25055f6a6a9cSAlexey Dobriyan struct seq_file *m = file->private_data; 25065f6a6a9cSAlexey Dobriyan m->private = ptr; 25075f6a6a9cSAlexey Dobriyan } else 25085f6a6a9cSAlexey Dobriyan kfree(ptr); 25095f6a6a9cSAlexey Dobriyan return ret; 25105f6a6a9cSAlexey Dobriyan } 25115f6a6a9cSAlexey Dobriyan 25125f6a6a9cSAlexey Dobriyan static const struct file_operations proc_vmalloc_operations = { 25135f6a6a9cSAlexey Dobriyan .open = vmalloc_open, 25145f6a6a9cSAlexey Dobriyan .read = seq_read, 25155f6a6a9cSAlexey Dobriyan .llseek = seq_lseek, 25165f6a6a9cSAlexey Dobriyan .release = seq_release_private, 25175f6a6a9cSAlexey Dobriyan }; 25185f6a6a9cSAlexey Dobriyan 25195f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 25205f6a6a9cSAlexey Dobriyan { 25215f6a6a9cSAlexey Dobriyan proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 25225f6a6a9cSAlexey Dobriyan return 0; 25235f6a6a9cSAlexey Dobriyan } 25245f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 2525a10aa579SChristoph Lameter #endif 2526a10aa579SChristoph Lameter 2527