11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 151da177e4SLinus Torvalds #include <linux/slab.h> 161da177e4SLinus Torvalds #include <linux/spinlock.h> 171da177e4SLinus Torvalds #include <linux/interrupt.h> 185f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 19a10aa579SChristoph Lameter #include <linux/seq_file.h> 203ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2123016969SChristoph Lameter #include <linux/kallsyms.h> 22db64fe02SNick Piggin #include <linux/list.h> 23db64fe02SNick Piggin #include <linux/rbtree.h> 24db64fe02SNick Piggin #include <linux/radix-tree.h> 25db64fe02SNick Piggin #include <linux/rcupdate.h> 26f0aa6617STejun Heo #include <linux/pfn.h> 2789219d37SCatalin Marinas #include <linux/kmemleak.h> 281da177e4SLinus Torvalds 29db64fe02SNick Piggin #include <asm/atomic.h> 301da177e4SLinus Torvalds #include <asm/uaccess.h> 311da177e4SLinus Torvalds #include <asm/tlbflush.h> 321da177e4SLinus Torvalds 331da177e4SLinus Torvalds 34db64fe02SNick Piggin /*** Page table manipulation functions ***/ 35b221385bSAdrian Bunk 361da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 371da177e4SLinus Torvalds { 381da177e4SLinus Torvalds pte_t *pte; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 411da177e4SLinus Torvalds do { 421da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 431da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 441da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 451da177e4SLinus Torvalds } 461da177e4SLinus Torvalds 47db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 481da177e4SLinus Torvalds { 491da177e4SLinus Torvalds pmd_t *pmd; 501da177e4SLinus Torvalds unsigned long next; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 531da177e4SLinus Torvalds do { 541da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 551da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 561da177e4SLinus Torvalds continue; 571da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 581da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 591da177e4SLinus Torvalds } 601da177e4SLinus Torvalds 61db64fe02SNick Piggin static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 621da177e4SLinus Torvalds { 631da177e4SLinus Torvalds pud_t *pud; 641da177e4SLinus Torvalds unsigned long next; 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 671da177e4SLinus Torvalds do { 681da177e4SLinus Torvalds next = pud_addr_end(addr, end); 691da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 701da177e4SLinus Torvalds continue; 711da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 721da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 731da177e4SLinus Torvalds } 741da177e4SLinus Torvalds 75db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 761da177e4SLinus Torvalds { 771da177e4SLinus Torvalds pgd_t *pgd; 781da177e4SLinus Torvalds unsigned long next; 791da177e4SLinus Torvalds 801da177e4SLinus Torvalds BUG_ON(addr >= end); 811da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 821da177e4SLinus Torvalds do { 831da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 841da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 851da177e4SLinus Torvalds continue; 861da177e4SLinus Torvalds vunmap_pud_range(pgd, addr, next); 871da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 881da177e4SLinus Torvalds } 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 91db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 921da177e4SLinus Torvalds { 931da177e4SLinus Torvalds pte_t *pte; 941da177e4SLinus Torvalds 95db64fe02SNick Piggin /* 96db64fe02SNick Piggin * nr is a running index into the array which helps higher level 97db64fe02SNick Piggin * callers keep track of where we're up to. 98db64fe02SNick Piggin */ 99db64fe02SNick Piggin 100872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1011da177e4SLinus Torvalds if (!pte) 1021da177e4SLinus Torvalds return -ENOMEM; 1031da177e4SLinus Torvalds do { 104db64fe02SNick Piggin struct page *page = pages[*nr]; 105db64fe02SNick Piggin 106db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 107db64fe02SNick Piggin return -EBUSY; 108db64fe02SNick Piggin if (WARN_ON(!page)) 1091da177e4SLinus Torvalds return -ENOMEM; 1101da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 111db64fe02SNick Piggin (*nr)++; 1121da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1131da177e4SLinus Torvalds return 0; 1141da177e4SLinus Torvalds } 1151da177e4SLinus Torvalds 116db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 117db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1181da177e4SLinus Torvalds { 1191da177e4SLinus Torvalds pmd_t *pmd; 1201da177e4SLinus Torvalds unsigned long next; 1211da177e4SLinus Torvalds 1221da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1231da177e4SLinus Torvalds if (!pmd) 1241da177e4SLinus Torvalds return -ENOMEM; 1251da177e4SLinus Torvalds do { 1261da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 127db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1281da177e4SLinus Torvalds return -ENOMEM; 1291da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1301da177e4SLinus Torvalds return 0; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 133db64fe02SNick Piggin static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 134db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1351da177e4SLinus Torvalds { 1361da177e4SLinus Torvalds pud_t *pud; 1371da177e4SLinus Torvalds unsigned long next; 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds pud = pud_alloc(&init_mm, pgd, addr); 1401da177e4SLinus Torvalds if (!pud) 1411da177e4SLinus Torvalds return -ENOMEM; 1421da177e4SLinus Torvalds do { 1431da177e4SLinus Torvalds next = pud_addr_end(addr, end); 144db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 1451da177e4SLinus Torvalds return -ENOMEM; 1461da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1471da177e4SLinus Torvalds return 0; 1481da177e4SLinus Torvalds } 1491da177e4SLinus Torvalds 150db64fe02SNick Piggin /* 151db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 152db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 153db64fe02SNick Piggin * 154db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 155db64fe02SNick Piggin */ 1568fc48985STejun Heo static int vmap_page_range_noflush(unsigned long start, unsigned long end, 157db64fe02SNick Piggin pgprot_t prot, struct page **pages) 1581da177e4SLinus Torvalds { 1591da177e4SLinus Torvalds pgd_t *pgd; 1601da177e4SLinus Torvalds unsigned long next; 1612e4e27c7SAdam Lackorzynski unsigned long addr = start; 162db64fe02SNick Piggin int err = 0; 163db64fe02SNick Piggin int nr = 0; 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds BUG_ON(addr >= end); 1661da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1671da177e4SLinus Torvalds do { 1681da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 169db64fe02SNick Piggin err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 1701da177e4SLinus Torvalds if (err) 1711da177e4SLinus Torvalds break; 1721da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 173db64fe02SNick Piggin 174db64fe02SNick Piggin if (unlikely(err)) 1751da177e4SLinus Torvalds return err; 176db64fe02SNick Piggin return nr; 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 1798fc48985STejun Heo static int vmap_page_range(unsigned long start, unsigned long end, 1808fc48985STejun Heo pgprot_t prot, struct page **pages) 1818fc48985STejun Heo { 1828fc48985STejun Heo int ret; 1838fc48985STejun Heo 1848fc48985STejun Heo ret = vmap_page_range_noflush(start, end, prot, pages); 1858fc48985STejun Heo flush_cache_vmap(start, end); 1868fc48985STejun Heo return ret; 1878fc48985STejun Heo } 1888fc48985STejun Heo 18973bdf0a6SLinus Torvalds static inline int is_vmalloc_or_module_addr(const void *x) 19073bdf0a6SLinus Torvalds { 19173bdf0a6SLinus Torvalds /* 192ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 19373bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 19473bdf0a6SLinus Torvalds * just put it in the vmalloc space. 19573bdf0a6SLinus Torvalds */ 19673bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 19773bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 19873bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 19973bdf0a6SLinus Torvalds return 1; 20073bdf0a6SLinus Torvalds #endif 20173bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 20273bdf0a6SLinus Torvalds } 20373bdf0a6SLinus Torvalds 20448667e7aSChristoph Lameter /* 205db64fe02SNick Piggin * Walk a vmap address to the struct page it maps. 20648667e7aSChristoph Lameter */ 207b3bdda02SChristoph Lameter struct page *vmalloc_to_page(const void *vmalloc_addr) 20848667e7aSChristoph Lameter { 20948667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 21048667e7aSChristoph Lameter struct page *page = NULL; 21148667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 21248667e7aSChristoph Lameter 2137aa413deSIngo Molnar /* 2147aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2157aa413deSIngo Molnar * architectures that do not vmalloc module space 2167aa413deSIngo Molnar */ 21773bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 21859ea7463SJiri Slaby 21948667e7aSChristoph Lameter if (!pgd_none(*pgd)) { 220db64fe02SNick Piggin pud_t *pud = pud_offset(pgd, addr); 22148667e7aSChristoph Lameter if (!pud_none(*pud)) { 222db64fe02SNick Piggin pmd_t *pmd = pmd_offset(pud, addr); 22348667e7aSChristoph Lameter if (!pmd_none(*pmd)) { 224db64fe02SNick Piggin pte_t *ptep, pte; 225db64fe02SNick Piggin 22648667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 22748667e7aSChristoph Lameter pte = *ptep; 22848667e7aSChristoph Lameter if (pte_present(pte)) 22948667e7aSChristoph Lameter page = pte_page(pte); 23048667e7aSChristoph Lameter pte_unmap(ptep); 23148667e7aSChristoph Lameter } 23248667e7aSChristoph Lameter } 23348667e7aSChristoph Lameter } 23448667e7aSChristoph Lameter return page; 23548667e7aSChristoph Lameter } 23648667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_page); 23748667e7aSChristoph Lameter 23848667e7aSChristoph Lameter /* 23948667e7aSChristoph Lameter * Map a vmalloc()-space virtual address to the physical page frame number. 24048667e7aSChristoph Lameter */ 241b3bdda02SChristoph Lameter unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 24248667e7aSChristoph Lameter { 24348667e7aSChristoph Lameter return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 24448667e7aSChristoph Lameter } 24548667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_pfn); 24648667e7aSChristoph Lameter 247db64fe02SNick Piggin 248db64fe02SNick Piggin /*** Global kva allocator ***/ 249db64fe02SNick Piggin 250db64fe02SNick Piggin #define VM_LAZY_FREE 0x01 251db64fe02SNick Piggin #define VM_LAZY_FREEING 0x02 252db64fe02SNick Piggin #define VM_VM_AREA 0x04 253db64fe02SNick Piggin 254db64fe02SNick Piggin struct vmap_area { 255db64fe02SNick Piggin unsigned long va_start; 256db64fe02SNick Piggin unsigned long va_end; 257db64fe02SNick Piggin unsigned long flags; 258db64fe02SNick Piggin struct rb_node rb_node; /* address sorted rbtree */ 259db64fe02SNick Piggin struct list_head list; /* address sorted list */ 260db64fe02SNick Piggin struct list_head purge_list; /* "lazy purge" list */ 261db64fe02SNick Piggin void *private; 262db64fe02SNick Piggin struct rcu_head rcu_head; 263db64fe02SNick Piggin }; 264db64fe02SNick Piggin 265db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 266db64fe02SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 267db64fe02SNick Piggin static LIST_HEAD(vmap_area_list); 268*ca23e405STejun Heo static unsigned long vmap_area_pcpu_hole; 269db64fe02SNick Piggin 270db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 2711da177e4SLinus Torvalds { 272db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 273db64fe02SNick Piggin 274db64fe02SNick Piggin while (n) { 275db64fe02SNick Piggin struct vmap_area *va; 276db64fe02SNick Piggin 277db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 278db64fe02SNick Piggin if (addr < va->va_start) 279db64fe02SNick Piggin n = n->rb_left; 280db64fe02SNick Piggin else if (addr > va->va_start) 281db64fe02SNick Piggin n = n->rb_right; 282db64fe02SNick Piggin else 283db64fe02SNick Piggin return va; 284db64fe02SNick Piggin } 285db64fe02SNick Piggin 286db64fe02SNick Piggin return NULL; 287db64fe02SNick Piggin } 288db64fe02SNick Piggin 289db64fe02SNick Piggin static void __insert_vmap_area(struct vmap_area *va) 290db64fe02SNick Piggin { 291db64fe02SNick Piggin struct rb_node **p = &vmap_area_root.rb_node; 292db64fe02SNick Piggin struct rb_node *parent = NULL; 293db64fe02SNick Piggin struct rb_node *tmp; 294db64fe02SNick Piggin 295db64fe02SNick Piggin while (*p) { 296db64fe02SNick Piggin struct vmap_area *tmp; 297db64fe02SNick Piggin 298db64fe02SNick Piggin parent = *p; 299db64fe02SNick Piggin tmp = rb_entry(parent, struct vmap_area, rb_node); 300db64fe02SNick Piggin if (va->va_start < tmp->va_end) 301db64fe02SNick Piggin p = &(*p)->rb_left; 302db64fe02SNick Piggin else if (va->va_end > tmp->va_start) 303db64fe02SNick Piggin p = &(*p)->rb_right; 304db64fe02SNick Piggin else 305db64fe02SNick Piggin BUG(); 306db64fe02SNick Piggin } 307db64fe02SNick Piggin 308db64fe02SNick Piggin rb_link_node(&va->rb_node, parent, p); 309db64fe02SNick Piggin rb_insert_color(&va->rb_node, &vmap_area_root); 310db64fe02SNick Piggin 311db64fe02SNick Piggin /* address-sort this list so it is usable like the vmlist */ 312db64fe02SNick Piggin tmp = rb_prev(&va->rb_node); 313db64fe02SNick Piggin if (tmp) { 314db64fe02SNick Piggin struct vmap_area *prev; 315db64fe02SNick Piggin prev = rb_entry(tmp, struct vmap_area, rb_node); 316db64fe02SNick Piggin list_add_rcu(&va->list, &prev->list); 317db64fe02SNick Piggin } else 318db64fe02SNick Piggin list_add_rcu(&va->list, &vmap_area_list); 319db64fe02SNick Piggin } 320db64fe02SNick Piggin 321db64fe02SNick Piggin static void purge_vmap_area_lazy(void); 322db64fe02SNick Piggin 323db64fe02SNick Piggin /* 324db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 325db64fe02SNick Piggin * vstart and vend. 326db64fe02SNick Piggin */ 327db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 328db64fe02SNick Piggin unsigned long align, 329db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 330db64fe02SNick Piggin int node, gfp_t gfp_mask) 331db64fe02SNick Piggin { 332db64fe02SNick Piggin struct vmap_area *va; 333db64fe02SNick Piggin struct rb_node *n; 3341da177e4SLinus Torvalds unsigned long addr; 335db64fe02SNick Piggin int purged = 0; 336db64fe02SNick Piggin 3377766970cSNick Piggin BUG_ON(!size); 338db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 339db64fe02SNick Piggin 340db64fe02SNick Piggin va = kmalloc_node(sizeof(struct vmap_area), 341db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 342db64fe02SNick Piggin if (unlikely(!va)) 343db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 344db64fe02SNick Piggin 345db64fe02SNick Piggin retry: 3460ae15132SGlauber Costa addr = ALIGN(vstart, align); 3470ae15132SGlauber Costa 348db64fe02SNick Piggin spin_lock(&vmap_area_lock); 3497766970cSNick Piggin if (addr + size - 1 < addr) 3507766970cSNick Piggin goto overflow; 3517766970cSNick Piggin 352db64fe02SNick Piggin /* XXX: could have a last_hole cache */ 353db64fe02SNick Piggin n = vmap_area_root.rb_node; 354db64fe02SNick Piggin if (n) { 355db64fe02SNick Piggin struct vmap_area *first = NULL; 356db64fe02SNick Piggin 357db64fe02SNick Piggin do { 358db64fe02SNick Piggin struct vmap_area *tmp; 359db64fe02SNick Piggin tmp = rb_entry(n, struct vmap_area, rb_node); 360db64fe02SNick Piggin if (tmp->va_end >= addr) { 361db64fe02SNick Piggin if (!first && tmp->va_start < addr + size) 362db64fe02SNick Piggin first = tmp; 363db64fe02SNick Piggin n = n->rb_left; 364db64fe02SNick Piggin } else { 365db64fe02SNick Piggin first = tmp; 366db64fe02SNick Piggin n = n->rb_right; 367db64fe02SNick Piggin } 368db64fe02SNick Piggin } while (n); 369db64fe02SNick Piggin 370db64fe02SNick Piggin if (!first) 371db64fe02SNick Piggin goto found; 372db64fe02SNick Piggin 373db64fe02SNick Piggin if (first->va_end < addr) { 374db64fe02SNick Piggin n = rb_next(&first->rb_node); 375db64fe02SNick Piggin if (n) 376db64fe02SNick Piggin first = rb_entry(n, struct vmap_area, rb_node); 377db64fe02SNick Piggin else 378db64fe02SNick Piggin goto found; 379db64fe02SNick Piggin } 380db64fe02SNick Piggin 381f011c2daSNick Piggin while (addr + size > first->va_start && addr + size <= vend) { 382db64fe02SNick Piggin addr = ALIGN(first->va_end + PAGE_SIZE, align); 3837766970cSNick Piggin if (addr + size - 1 < addr) 3847766970cSNick Piggin goto overflow; 385db64fe02SNick Piggin 386db64fe02SNick Piggin n = rb_next(&first->rb_node); 387db64fe02SNick Piggin if (n) 388db64fe02SNick Piggin first = rb_entry(n, struct vmap_area, rb_node); 389db64fe02SNick Piggin else 390db64fe02SNick Piggin goto found; 391db64fe02SNick Piggin } 392db64fe02SNick Piggin } 393db64fe02SNick Piggin found: 394db64fe02SNick Piggin if (addr + size > vend) { 3957766970cSNick Piggin overflow: 396db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 397db64fe02SNick Piggin if (!purged) { 398db64fe02SNick Piggin purge_vmap_area_lazy(); 399db64fe02SNick Piggin purged = 1; 400db64fe02SNick Piggin goto retry; 401db64fe02SNick Piggin } 402db64fe02SNick Piggin if (printk_ratelimit()) 403c1279c4eSGlauber Costa printk(KERN_WARNING 404c1279c4eSGlauber Costa "vmap allocation for size %lu failed: " 405c1279c4eSGlauber Costa "use vmalloc=<size> to increase size.\n", size); 4062498ce42SRalph Wuerthner kfree(va); 407db64fe02SNick Piggin return ERR_PTR(-EBUSY); 408db64fe02SNick Piggin } 409db64fe02SNick Piggin 410db64fe02SNick Piggin BUG_ON(addr & (align-1)); 411db64fe02SNick Piggin 412db64fe02SNick Piggin va->va_start = addr; 413db64fe02SNick Piggin va->va_end = addr + size; 414db64fe02SNick Piggin va->flags = 0; 415db64fe02SNick Piggin __insert_vmap_area(va); 416db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 417db64fe02SNick Piggin 418db64fe02SNick Piggin return va; 419db64fe02SNick Piggin } 420db64fe02SNick Piggin 421db64fe02SNick Piggin static void rcu_free_va(struct rcu_head *head) 422db64fe02SNick Piggin { 423db64fe02SNick Piggin struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); 424db64fe02SNick Piggin 425db64fe02SNick Piggin kfree(va); 426db64fe02SNick Piggin } 427db64fe02SNick Piggin 428db64fe02SNick Piggin static void __free_vmap_area(struct vmap_area *va) 429db64fe02SNick Piggin { 430db64fe02SNick Piggin BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 431db64fe02SNick Piggin rb_erase(&va->rb_node, &vmap_area_root); 432db64fe02SNick Piggin RB_CLEAR_NODE(&va->rb_node); 433db64fe02SNick Piggin list_del_rcu(&va->list); 434db64fe02SNick Piggin 435*ca23e405STejun Heo /* 436*ca23e405STejun Heo * Track the highest possible candidate for pcpu area 437*ca23e405STejun Heo * allocation. Areas outside of vmalloc area can be returned 438*ca23e405STejun Heo * here too, consider only end addresses which fall inside 439*ca23e405STejun Heo * vmalloc area proper. 440*ca23e405STejun Heo */ 441*ca23e405STejun Heo if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 442*ca23e405STejun Heo vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 443*ca23e405STejun Heo 444db64fe02SNick Piggin call_rcu(&va->rcu_head, rcu_free_va); 445db64fe02SNick Piggin } 446db64fe02SNick Piggin 447db64fe02SNick Piggin /* 448db64fe02SNick Piggin * Free a region of KVA allocated by alloc_vmap_area 449db64fe02SNick Piggin */ 450db64fe02SNick Piggin static void free_vmap_area(struct vmap_area *va) 451db64fe02SNick Piggin { 452db64fe02SNick Piggin spin_lock(&vmap_area_lock); 453db64fe02SNick Piggin __free_vmap_area(va); 454db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 455db64fe02SNick Piggin } 456db64fe02SNick Piggin 457db64fe02SNick Piggin /* 458db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 459db64fe02SNick Piggin */ 460db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 461db64fe02SNick Piggin { 462db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 463db64fe02SNick Piggin } 464db64fe02SNick Piggin 465cd52858cSNick Piggin static void vmap_debug_free_range(unsigned long start, unsigned long end) 466cd52858cSNick Piggin { 467cd52858cSNick Piggin /* 468cd52858cSNick Piggin * Unmap page tables and force a TLB flush immediately if 469cd52858cSNick Piggin * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 470cd52858cSNick Piggin * bugs similarly to those in linear kernel virtual address 471cd52858cSNick Piggin * space after a page has been freed. 472cd52858cSNick Piggin * 473cd52858cSNick Piggin * All the lazy freeing logic is still retained, in order to 474cd52858cSNick Piggin * minimise intrusiveness of this debugging feature. 475cd52858cSNick Piggin * 476cd52858cSNick Piggin * This is going to be *slow* (linear kernel virtual address 477cd52858cSNick Piggin * debugging doesn't do a broadcast TLB flush so it is a lot 478cd52858cSNick Piggin * faster). 479cd52858cSNick Piggin */ 480cd52858cSNick Piggin #ifdef CONFIG_DEBUG_PAGEALLOC 481cd52858cSNick Piggin vunmap_page_range(start, end); 482cd52858cSNick Piggin flush_tlb_kernel_range(start, end); 483cd52858cSNick Piggin #endif 484cd52858cSNick Piggin } 485cd52858cSNick Piggin 486db64fe02SNick Piggin /* 487db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 488db64fe02SNick Piggin * before attempting to purge with a TLB flush. 489db64fe02SNick Piggin * 490db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 491db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 492db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 493db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 494db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 495db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 496db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 497db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 498db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 499db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 500db64fe02SNick Piggin * becomes a problem on bigger systems. 501db64fe02SNick Piggin */ 502db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 503db64fe02SNick Piggin { 504db64fe02SNick Piggin unsigned int log; 505db64fe02SNick Piggin 506db64fe02SNick Piggin log = fls(num_online_cpus()); 507db64fe02SNick Piggin 508db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 509db64fe02SNick Piggin } 510db64fe02SNick Piggin 511db64fe02SNick Piggin static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 512db64fe02SNick Piggin 513db64fe02SNick Piggin /* 514db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 515db64fe02SNick Piggin * 516db64fe02SNick Piggin * If sync is 0 then don't purge if there is already a purge in progress. 517db64fe02SNick Piggin * If force_flush is 1, then flush kernel TLBs between *start and *end even 518db64fe02SNick Piggin * if we found no lazy vmap areas to unmap (callers can use this to optimise 519db64fe02SNick Piggin * their own TLB flushing). 520db64fe02SNick Piggin * Returns with *start = min(*start, lowest purged address) 521db64fe02SNick Piggin * *end = max(*end, highest purged address) 522db64fe02SNick Piggin */ 523db64fe02SNick Piggin static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 524db64fe02SNick Piggin int sync, int force_flush) 525db64fe02SNick Piggin { 52646666d8aSAndrew Morton static DEFINE_SPINLOCK(purge_lock); 527db64fe02SNick Piggin LIST_HEAD(valist); 528db64fe02SNick Piggin struct vmap_area *va; 529cbb76676SVegard Nossum struct vmap_area *n_va; 530db64fe02SNick Piggin int nr = 0; 531db64fe02SNick Piggin 532db64fe02SNick Piggin /* 533db64fe02SNick Piggin * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 534db64fe02SNick Piggin * should not expect such behaviour. This just simplifies locking for 535db64fe02SNick Piggin * the case that isn't actually used at the moment anyway. 536db64fe02SNick Piggin */ 537db64fe02SNick Piggin if (!sync && !force_flush) { 53846666d8aSAndrew Morton if (!spin_trylock(&purge_lock)) 539db64fe02SNick Piggin return; 540db64fe02SNick Piggin } else 54146666d8aSAndrew Morton spin_lock(&purge_lock); 542db64fe02SNick Piggin 543db64fe02SNick Piggin rcu_read_lock(); 544db64fe02SNick Piggin list_for_each_entry_rcu(va, &vmap_area_list, list) { 545db64fe02SNick Piggin if (va->flags & VM_LAZY_FREE) { 546db64fe02SNick Piggin if (va->va_start < *start) 547db64fe02SNick Piggin *start = va->va_start; 548db64fe02SNick Piggin if (va->va_end > *end) 549db64fe02SNick Piggin *end = va->va_end; 550db64fe02SNick Piggin nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 551db64fe02SNick Piggin unmap_vmap_area(va); 552db64fe02SNick Piggin list_add_tail(&va->purge_list, &valist); 553db64fe02SNick Piggin va->flags |= VM_LAZY_FREEING; 554db64fe02SNick Piggin va->flags &= ~VM_LAZY_FREE; 555db64fe02SNick Piggin } 556db64fe02SNick Piggin } 557db64fe02SNick Piggin rcu_read_unlock(); 558db64fe02SNick Piggin 559db64fe02SNick Piggin if (nr) { 560db64fe02SNick Piggin BUG_ON(nr > atomic_read(&vmap_lazy_nr)); 561db64fe02SNick Piggin atomic_sub(nr, &vmap_lazy_nr); 562db64fe02SNick Piggin } 563db64fe02SNick Piggin 564db64fe02SNick Piggin if (nr || force_flush) 565db64fe02SNick Piggin flush_tlb_kernel_range(*start, *end); 566db64fe02SNick Piggin 567db64fe02SNick Piggin if (nr) { 568db64fe02SNick Piggin spin_lock(&vmap_area_lock); 569cbb76676SVegard Nossum list_for_each_entry_safe(va, n_va, &valist, purge_list) 570db64fe02SNick Piggin __free_vmap_area(va); 571db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 572db64fe02SNick Piggin } 57346666d8aSAndrew Morton spin_unlock(&purge_lock); 574db64fe02SNick Piggin } 575db64fe02SNick Piggin 576db64fe02SNick Piggin /* 577496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 578496850e5SNick Piggin * is already purging. 579496850e5SNick Piggin */ 580496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 581496850e5SNick Piggin { 582496850e5SNick Piggin unsigned long start = ULONG_MAX, end = 0; 583496850e5SNick Piggin 584496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 0, 0); 585496850e5SNick Piggin } 586496850e5SNick Piggin 587496850e5SNick Piggin /* 588db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 589db64fe02SNick Piggin */ 590db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 591db64fe02SNick Piggin { 592db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 593db64fe02SNick Piggin 594496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, 0); 595db64fe02SNick Piggin } 596db64fe02SNick Piggin 597db64fe02SNick Piggin /* 598b29acbdcSNick Piggin * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 599b29acbdcSNick Piggin * called for the correct range previously. 600db64fe02SNick Piggin */ 601b29acbdcSNick Piggin static void free_unmap_vmap_area_noflush(struct vmap_area *va) 602db64fe02SNick Piggin { 603db64fe02SNick Piggin va->flags |= VM_LAZY_FREE; 604db64fe02SNick Piggin atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 605db64fe02SNick Piggin if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 606496850e5SNick Piggin try_purge_vmap_area_lazy(); 607db64fe02SNick Piggin } 608db64fe02SNick Piggin 609b29acbdcSNick Piggin /* 610b29acbdcSNick Piggin * Free and unmap a vmap area 611b29acbdcSNick Piggin */ 612b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 613b29acbdcSNick Piggin { 614b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 615b29acbdcSNick Piggin free_unmap_vmap_area_noflush(va); 616b29acbdcSNick Piggin } 617b29acbdcSNick Piggin 618db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 619db64fe02SNick Piggin { 620db64fe02SNick Piggin struct vmap_area *va; 621db64fe02SNick Piggin 622db64fe02SNick Piggin spin_lock(&vmap_area_lock); 623db64fe02SNick Piggin va = __find_vmap_area(addr); 624db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 625db64fe02SNick Piggin 626db64fe02SNick Piggin return va; 627db64fe02SNick Piggin } 628db64fe02SNick Piggin 629db64fe02SNick Piggin static void free_unmap_vmap_area_addr(unsigned long addr) 630db64fe02SNick Piggin { 631db64fe02SNick Piggin struct vmap_area *va; 632db64fe02SNick Piggin 633db64fe02SNick Piggin va = find_vmap_area(addr); 634db64fe02SNick Piggin BUG_ON(!va); 635db64fe02SNick Piggin free_unmap_vmap_area(va); 636db64fe02SNick Piggin } 637db64fe02SNick Piggin 638db64fe02SNick Piggin 639db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 640db64fe02SNick Piggin 641db64fe02SNick Piggin /* 642db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 643db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 644db64fe02SNick Piggin */ 645db64fe02SNick Piggin /* 646db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 647db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 648db64fe02SNick Piggin * instead (we just need a rough idea) 649db64fe02SNick Piggin */ 650db64fe02SNick Piggin #if BITS_PER_LONG == 32 651db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 652db64fe02SNick Piggin #else 653db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 654db64fe02SNick Piggin #endif 655db64fe02SNick Piggin 656db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 657db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 658db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 659db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 660db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 661db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 662db64fe02SNick Piggin #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 663db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 664db64fe02SNick Piggin VMALLOC_PAGES / NR_CPUS / 16)) 665db64fe02SNick Piggin 666db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 667db64fe02SNick Piggin 6689b463334SJeremy Fitzhardinge static bool vmap_initialized __read_mostly = false; 6699b463334SJeremy Fitzhardinge 670db64fe02SNick Piggin struct vmap_block_queue { 671db64fe02SNick Piggin spinlock_t lock; 672db64fe02SNick Piggin struct list_head free; 673db64fe02SNick Piggin struct list_head dirty; 674db64fe02SNick Piggin unsigned int nr_dirty; 675db64fe02SNick Piggin }; 676db64fe02SNick Piggin 677db64fe02SNick Piggin struct vmap_block { 678db64fe02SNick Piggin spinlock_t lock; 679db64fe02SNick Piggin struct vmap_area *va; 680db64fe02SNick Piggin struct vmap_block_queue *vbq; 681db64fe02SNick Piggin unsigned long free, dirty; 682db64fe02SNick Piggin DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 683db64fe02SNick Piggin DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 684db64fe02SNick Piggin union { 685db64fe02SNick Piggin struct list_head free_list; 686db64fe02SNick Piggin struct rcu_head rcu_head; 687db64fe02SNick Piggin }; 688db64fe02SNick Piggin }; 689db64fe02SNick Piggin 690db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 691db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 692db64fe02SNick Piggin 693db64fe02SNick Piggin /* 694db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 695db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 696db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 697db64fe02SNick Piggin */ 698db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 699db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 700db64fe02SNick Piggin 701db64fe02SNick Piggin /* 702db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 703db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 704db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 705db64fe02SNick Piggin * big problem. 706db64fe02SNick Piggin */ 707db64fe02SNick Piggin 708db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 709db64fe02SNick Piggin { 710db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 711db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 712db64fe02SNick Piggin return addr; 713db64fe02SNick Piggin } 714db64fe02SNick Piggin 715db64fe02SNick Piggin static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 716db64fe02SNick Piggin { 717db64fe02SNick Piggin struct vmap_block_queue *vbq; 718db64fe02SNick Piggin struct vmap_block *vb; 719db64fe02SNick Piggin struct vmap_area *va; 720db64fe02SNick Piggin unsigned long vb_idx; 721db64fe02SNick Piggin int node, err; 722db64fe02SNick Piggin 723db64fe02SNick Piggin node = numa_node_id(); 724db64fe02SNick Piggin 725db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 726db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 727db64fe02SNick Piggin if (unlikely(!vb)) 728db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 729db64fe02SNick Piggin 730db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 731db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 732db64fe02SNick Piggin node, gfp_mask); 733db64fe02SNick Piggin if (unlikely(IS_ERR(va))) { 734db64fe02SNick Piggin kfree(vb); 735db64fe02SNick Piggin return ERR_PTR(PTR_ERR(va)); 736db64fe02SNick Piggin } 737db64fe02SNick Piggin 738db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 739db64fe02SNick Piggin if (unlikely(err)) { 740db64fe02SNick Piggin kfree(vb); 741db64fe02SNick Piggin free_vmap_area(va); 742db64fe02SNick Piggin return ERR_PTR(err); 743db64fe02SNick Piggin } 744db64fe02SNick Piggin 745db64fe02SNick Piggin spin_lock_init(&vb->lock); 746db64fe02SNick Piggin vb->va = va; 747db64fe02SNick Piggin vb->free = VMAP_BBMAP_BITS; 748db64fe02SNick Piggin vb->dirty = 0; 749db64fe02SNick Piggin bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 750db64fe02SNick Piggin bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 751db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 752db64fe02SNick Piggin 753db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 754db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 755db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 756db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 757db64fe02SNick Piggin BUG_ON(err); 758db64fe02SNick Piggin radix_tree_preload_end(); 759db64fe02SNick Piggin 760db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 761db64fe02SNick Piggin vb->vbq = vbq; 762db64fe02SNick Piggin spin_lock(&vbq->lock); 763db64fe02SNick Piggin list_add(&vb->free_list, &vbq->free); 764db64fe02SNick Piggin spin_unlock(&vbq->lock); 765db64fe02SNick Piggin put_cpu_var(vmap_cpu_blocks); 766db64fe02SNick Piggin 767db64fe02SNick Piggin return vb; 768db64fe02SNick Piggin } 769db64fe02SNick Piggin 770db64fe02SNick Piggin static void rcu_free_vb(struct rcu_head *head) 771db64fe02SNick Piggin { 772db64fe02SNick Piggin struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); 773db64fe02SNick Piggin 774db64fe02SNick Piggin kfree(vb); 775db64fe02SNick Piggin } 776db64fe02SNick Piggin 777db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 778db64fe02SNick Piggin { 779db64fe02SNick Piggin struct vmap_block *tmp; 780db64fe02SNick Piggin unsigned long vb_idx; 781db64fe02SNick Piggin 782d086817dSMinChan Kim BUG_ON(!list_empty(&vb->free_list)); 783db64fe02SNick Piggin 784db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 785db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 786db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 787db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 788db64fe02SNick Piggin BUG_ON(tmp != vb); 789db64fe02SNick Piggin 790b29acbdcSNick Piggin free_unmap_vmap_area_noflush(vb->va); 791db64fe02SNick Piggin call_rcu(&vb->rcu_head, rcu_free_vb); 792db64fe02SNick Piggin } 793db64fe02SNick Piggin 794db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 795db64fe02SNick Piggin { 796db64fe02SNick Piggin struct vmap_block_queue *vbq; 797db64fe02SNick Piggin struct vmap_block *vb; 798db64fe02SNick Piggin unsigned long addr = 0; 799db64fe02SNick Piggin unsigned int order; 800db64fe02SNick Piggin 801db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 802db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 803db64fe02SNick Piggin order = get_order(size); 804db64fe02SNick Piggin 805db64fe02SNick Piggin again: 806db64fe02SNick Piggin rcu_read_lock(); 807db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 808db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 809db64fe02SNick Piggin int i; 810db64fe02SNick Piggin 811db64fe02SNick Piggin spin_lock(&vb->lock); 812db64fe02SNick Piggin i = bitmap_find_free_region(vb->alloc_map, 813db64fe02SNick Piggin VMAP_BBMAP_BITS, order); 814db64fe02SNick Piggin 815db64fe02SNick Piggin if (i >= 0) { 816db64fe02SNick Piggin addr = vb->va->va_start + (i << PAGE_SHIFT); 817db64fe02SNick Piggin BUG_ON(addr_to_vb_idx(addr) != 818db64fe02SNick Piggin addr_to_vb_idx(vb->va->va_start)); 819db64fe02SNick Piggin vb->free -= 1UL << order; 820db64fe02SNick Piggin if (vb->free == 0) { 821db64fe02SNick Piggin spin_lock(&vbq->lock); 822db64fe02SNick Piggin list_del_init(&vb->free_list); 823db64fe02SNick Piggin spin_unlock(&vbq->lock); 824db64fe02SNick Piggin } 825db64fe02SNick Piggin spin_unlock(&vb->lock); 826db64fe02SNick Piggin break; 827db64fe02SNick Piggin } 828db64fe02SNick Piggin spin_unlock(&vb->lock); 829db64fe02SNick Piggin } 830db64fe02SNick Piggin put_cpu_var(vmap_cpu_blocks); 831db64fe02SNick Piggin rcu_read_unlock(); 832db64fe02SNick Piggin 833db64fe02SNick Piggin if (!addr) { 834db64fe02SNick Piggin vb = new_vmap_block(gfp_mask); 835db64fe02SNick Piggin if (IS_ERR(vb)) 836db64fe02SNick Piggin return vb; 837db64fe02SNick Piggin goto again; 838db64fe02SNick Piggin } 839db64fe02SNick Piggin 840db64fe02SNick Piggin return (void *)addr; 841db64fe02SNick Piggin } 842db64fe02SNick Piggin 843db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 844db64fe02SNick Piggin { 845db64fe02SNick Piggin unsigned long offset; 846db64fe02SNick Piggin unsigned long vb_idx; 847db64fe02SNick Piggin unsigned int order; 848db64fe02SNick Piggin struct vmap_block *vb; 849db64fe02SNick Piggin 850db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 851db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 852b29acbdcSNick Piggin 853b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 854b29acbdcSNick Piggin 855db64fe02SNick Piggin order = get_order(size); 856db64fe02SNick Piggin 857db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 858db64fe02SNick Piggin 859db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 860db64fe02SNick Piggin rcu_read_lock(); 861db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 862db64fe02SNick Piggin rcu_read_unlock(); 863db64fe02SNick Piggin BUG_ON(!vb); 864db64fe02SNick Piggin 865db64fe02SNick Piggin spin_lock(&vb->lock); 866db64fe02SNick Piggin bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); 867d086817dSMinChan Kim 868db64fe02SNick Piggin vb->dirty += 1UL << order; 869db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 870db64fe02SNick Piggin BUG_ON(vb->free || !list_empty(&vb->free_list)); 871db64fe02SNick Piggin spin_unlock(&vb->lock); 872db64fe02SNick Piggin free_vmap_block(vb); 873db64fe02SNick Piggin } else 874db64fe02SNick Piggin spin_unlock(&vb->lock); 875db64fe02SNick Piggin } 876db64fe02SNick Piggin 877db64fe02SNick Piggin /** 878db64fe02SNick Piggin * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 879db64fe02SNick Piggin * 880db64fe02SNick Piggin * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 881db64fe02SNick Piggin * to amortize TLB flushing overheads. What this means is that any page you 882db64fe02SNick Piggin * have now, may, in a former life, have been mapped into kernel virtual 883db64fe02SNick Piggin * address by the vmap layer and so there might be some CPUs with TLB entries 884db64fe02SNick Piggin * still referencing that page (additional to the regular 1:1 kernel mapping). 885db64fe02SNick Piggin * 886db64fe02SNick Piggin * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 887db64fe02SNick Piggin * be sure that none of the pages we have control over will have any aliases 888db64fe02SNick Piggin * from the vmap layer. 889db64fe02SNick Piggin */ 890db64fe02SNick Piggin void vm_unmap_aliases(void) 891db64fe02SNick Piggin { 892db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 893db64fe02SNick Piggin int cpu; 894db64fe02SNick Piggin int flush = 0; 895db64fe02SNick Piggin 8969b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 8979b463334SJeremy Fitzhardinge return; 8989b463334SJeremy Fitzhardinge 899db64fe02SNick Piggin for_each_possible_cpu(cpu) { 900db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 901db64fe02SNick Piggin struct vmap_block *vb; 902db64fe02SNick Piggin 903db64fe02SNick Piggin rcu_read_lock(); 904db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 905db64fe02SNick Piggin int i; 906db64fe02SNick Piggin 907db64fe02SNick Piggin spin_lock(&vb->lock); 908db64fe02SNick Piggin i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 909db64fe02SNick Piggin while (i < VMAP_BBMAP_BITS) { 910db64fe02SNick Piggin unsigned long s, e; 911db64fe02SNick Piggin int j; 912db64fe02SNick Piggin j = find_next_zero_bit(vb->dirty_map, 913db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 914db64fe02SNick Piggin 915db64fe02SNick Piggin s = vb->va->va_start + (i << PAGE_SHIFT); 916db64fe02SNick Piggin e = vb->va->va_start + (j << PAGE_SHIFT); 917db64fe02SNick Piggin vunmap_page_range(s, e); 918db64fe02SNick Piggin flush = 1; 919db64fe02SNick Piggin 920db64fe02SNick Piggin if (s < start) 921db64fe02SNick Piggin start = s; 922db64fe02SNick Piggin if (e > end) 923db64fe02SNick Piggin end = e; 924db64fe02SNick Piggin 925db64fe02SNick Piggin i = j; 926db64fe02SNick Piggin i = find_next_bit(vb->dirty_map, 927db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 928db64fe02SNick Piggin } 929db64fe02SNick Piggin spin_unlock(&vb->lock); 930db64fe02SNick Piggin } 931db64fe02SNick Piggin rcu_read_unlock(); 932db64fe02SNick Piggin } 933db64fe02SNick Piggin 934db64fe02SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, flush); 935db64fe02SNick Piggin } 936db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 937db64fe02SNick Piggin 938db64fe02SNick Piggin /** 939db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 940db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 941db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 942db64fe02SNick Piggin */ 943db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 944db64fe02SNick Piggin { 945db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 946db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 947db64fe02SNick Piggin 948db64fe02SNick Piggin BUG_ON(!addr); 949db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 950db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 951db64fe02SNick Piggin BUG_ON(addr & (PAGE_SIZE-1)); 952db64fe02SNick Piggin 953db64fe02SNick Piggin debug_check_no_locks_freed(mem, size); 954cd52858cSNick Piggin vmap_debug_free_range(addr, addr+size); 955db64fe02SNick Piggin 956db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) 957db64fe02SNick Piggin vb_free(mem, size); 958db64fe02SNick Piggin else 959db64fe02SNick Piggin free_unmap_vmap_area_addr(addr); 960db64fe02SNick Piggin } 961db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 962db64fe02SNick Piggin 963db64fe02SNick Piggin /** 964db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 965db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 966db64fe02SNick Piggin * @count: number of pages 967db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 968db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 969e99c97adSRandy Dunlap * 970e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 971db64fe02SNick Piggin */ 972db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 973db64fe02SNick Piggin { 974db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 975db64fe02SNick Piggin unsigned long addr; 976db64fe02SNick Piggin void *mem; 977db64fe02SNick Piggin 978db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 979db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 980db64fe02SNick Piggin if (IS_ERR(mem)) 981db64fe02SNick Piggin return NULL; 982db64fe02SNick Piggin addr = (unsigned long)mem; 983db64fe02SNick Piggin } else { 984db64fe02SNick Piggin struct vmap_area *va; 985db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 986db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 987db64fe02SNick Piggin if (IS_ERR(va)) 988db64fe02SNick Piggin return NULL; 989db64fe02SNick Piggin 990db64fe02SNick Piggin addr = va->va_start; 991db64fe02SNick Piggin mem = (void *)addr; 992db64fe02SNick Piggin } 993db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 994db64fe02SNick Piggin vm_unmap_ram(mem, count); 995db64fe02SNick Piggin return NULL; 996db64fe02SNick Piggin } 997db64fe02SNick Piggin return mem; 998db64fe02SNick Piggin } 999db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 1000db64fe02SNick Piggin 1001f0aa6617STejun Heo /** 1002f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 1003f0aa6617STejun Heo * @vm: vm_struct to register 1004c0c0a293STejun Heo * @align: requested alignment 1005f0aa6617STejun Heo * 1006f0aa6617STejun Heo * This function is used to register kernel vm area before 1007f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 1008f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 1009f0aa6617STejun Heo * vm->addr contains the allocated address. 1010f0aa6617STejun Heo * 1011f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1012f0aa6617STejun Heo */ 1013c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1014f0aa6617STejun Heo { 1015f0aa6617STejun Heo static size_t vm_init_off __initdata; 1016c0c0a293STejun Heo unsigned long addr; 1017f0aa6617STejun Heo 1018c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 1019c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1020c0c0a293STejun Heo 1021c0c0a293STejun Heo vm->addr = (void *)addr; 1022f0aa6617STejun Heo 1023f0aa6617STejun Heo vm->next = vmlist; 1024f0aa6617STejun Heo vmlist = vm; 1025f0aa6617STejun Heo } 1026f0aa6617STejun Heo 1027db64fe02SNick Piggin void __init vmalloc_init(void) 1028db64fe02SNick Piggin { 1029822c18f2SIvan Kokshaysky struct vmap_area *va; 1030822c18f2SIvan Kokshaysky struct vm_struct *tmp; 1031db64fe02SNick Piggin int i; 1032db64fe02SNick Piggin 1033db64fe02SNick Piggin for_each_possible_cpu(i) { 1034db64fe02SNick Piggin struct vmap_block_queue *vbq; 1035db64fe02SNick Piggin 1036db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1037db64fe02SNick Piggin spin_lock_init(&vbq->lock); 1038db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 1039db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->dirty); 1040db64fe02SNick Piggin vbq->nr_dirty = 0; 1041db64fe02SNick Piggin } 10429b463334SJeremy Fitzhardinge 1043822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1044822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 104543ebdac4SPekka Enberg va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1046822c18f2SIvan Kokshaysky va->flags = tmp->flags | VM_VM_AREA; 1047822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1048822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1049822c18f2SIvan Kokshaysky __insert_vmap_area(va); 1050822c18f2SIvan Kokshaysky } 1051*ca23e405STejun Heo 1052*ca23e405STejun Heo vmap_area_pcpu_hole = VMALLOC_END; 1053*ca23e405STejun Heo 10549b463334SJeremy Fitzhardinge vmap_initialized = true; 1055db64fe02SNick Piggin } 1056db64fe02SNick Piggin 10578fc48985STejun Heo /** 10588fc48985STejun Heo * map_kernel_range_noflush - map kernel VM area with the specified pages 10598fc48985STejun Heo * @addr: start of the VM area to map 10608fc48985STejun Heo * @size: size of the VM area to map 10618fc48985STejun Heo * @prot: page protection flags to use 10628fc48985STejun Heo * @pages: pages to map 10638fc48985STejun Heo * 10648fc48985STejun Heo * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 10658fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 10668fc48985STejun Heo * friends. 10678fc48985STejun Heo * 10688fc48985STejun Heo * NOTE: 10698fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 10708fc48985STejun Heo * responsible for calling flush_cache_vmap() on to-be-mapped areas 10718fc48985STejun Heo * before calling this function. 10728fc48985STejun Heo * 10738fc48985STejun Heo * RETURNS: 10748fc48985STejun Heo * The number of pages mapped on success, -errno on failure. 10758fc48985STejun Heo */ 10768fc48985STejun Heo int map_kernel_range_noflush(unsigned long addr, unsigned long size, 10778fc48985STejun Heo pgprot_t prot, struct page **pages) 10788fc48985STejun Heo { 10798fc48985STejun Heo return vmap_page_range_noflush(addr, addr + size, prot, pages); 10808fc48985STejun Heo } 10818fc48985STejun Heo 10828fc48985STejun Heo /** 10838fc48985STejun Heo * unmap_kernel_range_noflush - unmap kernel VM area 10848fc48985STejun Heo * @addr: start of the VM area to unmap 10858fc48985STejun Heo * @size: size of the VM area to unmap 10868fc48985STejun Heo * 10878fc48985STejun Heo * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 10888fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 10898fc48985STejun Heo * friends. 10908fc48985STejun Heo * 10918fc48985STejun Heo * NOTE: 10928fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 10938fc48985STejun Heo * responsible for calling flush_cache_vunmap() on to-be-mapped areas 10948fc48985STejun Heo * before calling this function and flush_tlb_kernel_range() after. 10958fc48985STejun Heo */ 10968fc48985STejun Heo void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 10978fc48985STejun Heo { 10988fc48985STejun Heo vunmap_page_range(addr, addr + size); 10998fc48985STejun Heo } 11008fc48985STejun Heo 11018fc48985STejun Heo /** 11028fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 11038fc48985STejun Heo * @addr: start of the VM area to unmap 11048fc48985STejun Heo * @size: size of the VM area to unmap 11058fc48985STejun Heo * 11068fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 11078fc48985STejun Heo * the unmapping and tlb after. 11088fc48985STejun Heo */ 1109db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 1110db64fe02SNick Piggin { 1111db64fe02SNick Piggin unsigned long end = addr + size; 1112f6fcba70STejun Heo 1113f6fcba70STejun Heo flush_cache_vunmap(addr, end); 1114db64fe02SNick Piggin vunmap_page_range(addr, end); 1115db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 1116db64fe02SNick Piggin } 1117db64fe02SNick Piggin 1118db64fe02SNick Piggin int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1119db64fe02SNick Piggin { 1120db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 1121db64fe02SNick Piggin unsigned long end = addr + area->size - PAGE_SIZE; 1122db64fe02SNick Piggin int err; 1123db64fe02SNick Piggin 1124db64fe02SNick Piggin err = vmap_page_range(addr, end, prot, *pages); 1125db64fe02SNick Piggin if (err > 0) { 1126db64fe02SNick Piggin *pages += err; 1127db64fe02SNick Piggin err = 0; 1128db64fe02SNick Piggin } 1129db64fe02SNick Piggin 1130db64fe02SNick Piggin return err; 1131db64fe02SNick Piggin } 1132db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 1133db64fe02SNick Piggin 1134db64fe02SNick Piggin /*** Old vmalloc interfaces ***/ 1135db64fe02SNick Piggin DEFINE_RWLOCK(vmlist_lock); 1136db64fe02SNick Piggin struct vm_struct *vmlist; 1137db64fe02SNick Piggin 1138cf88c790STejun Heo static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 1139cf88c790STejun Heo unsigned long flags, void *caller) 1140cf88c790STejun Heo { 1141cf88c790STejun Heo struct vm_struct *tmp, **p; 1142cf88c790STejun Heo 1143cf88c790STejun Heo vm->flags = flags; 1144cf88c790STejun Heo vm->addr = (void *)va->va_start; 1145cf88c790STejun Heo vm->size = va->va_end - va->va_start; 1146cf88c790STejun Heo vm->caller = caller; 1147cf88c790STejun Heo va->private = vm; 1148cf88c790STejun Heo va->flags |= VM_VM_AREA; 1149cf88c790STejun Heo 1150cf88c790STejun Heo write_lock(&vmlist_lock); 1151cf88c790STejun Heo for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1152cf88c790STejun Heo if (tmp->addr >= vm->addr) 1153cf88c790STejun Heo break; 1154cf88c790STejun Heo } 1155cf88c790STejun Heo vm->next = *p; 1156cf88c790STejun Heo *p = vm; 1157cf88c790STejun Heo write_unlock(&vmlist_lock); 1158cf88c790STejun Heo } 1159cf88c790STejun Heo 1160db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 1161db64fe02SNick Piggin unsigned long flags, unsigned long start, unsigned long end, 1162db64fe02SNick Piggin int node, gfp_t gfp_mask, void *caller) 1163db64fe02SNick Piggin { 1164db64fe02SNick Piggin static struct vmap_area *va; 1165db64fe02SNick Piggin struct vm_struct *area; 1166db64fe02SNick Piggin unsigned long align = 1; 11671da177e4SLinus Torvalds 116852fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 11691da177e4SLinus Torvalds if (flags & VM_IOREMAP) { 11701da177e4SLinus Torvalds int bit = fls(size); 11711da177e4SLinus Torvalds 11721da177e4SLinus Torvalds if (bit > IOREMAP_MAX_ORDER) 11731da177e4SLinus Torvalds bit = IOREMAP_MAX_ORDER; 11741da177e4SLinus Torvalds else if (bit < PAGE_SHIFT) 11751da177e4SLinus Torvalds bit = PAGE_SHIFT; 11761da177e4SLinus Torvalds 11771da177e4SLinus Torvalds align = 1ul << bit; 11781da177e4SLinus Torvalds } 1179db64fe02SNick Piggin 11801da177e4SLinus Torvalds size = PAGE_ALIGN(size); 118131be8309SOGAWA Hirofumi if (unlikely(!size)) 118231be8309SOGAWA Hirofumi return NULL; 11831da177e4SLinus Torvalds 1184cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 11851da177e4SLinus Torvalds if (unlikely(!area)) 11861da177e4SLinus Torvalds return NULL; 11871da177e4SLinus Torvalds 11881da177e4SLinus Torvalds /* 11891da177e4SLinus Torvalds * We always allocate a guard page. 11901da177e4SLinus Torvalds */ 11911da177e4SLinus Torvalds size += PAGE_SIZE; 11921da177e4SLinus Torvalds 1193db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1194db64fe02SNick Piggin if (IS_ERR(va)) { 1195db64fe02SNick Piggin kfree(area); 1196db64fe02SNick Piggin return NULL; 11971da177e4SLinus Torvalds } 11981da177e4SLinus Torvalds 1199cf88c790STejun Heo insert_vmalloc_vm(area, va, flags, caller); 12001da177e4SLinus Torvalds return area; 12011da177e4SLinus Torvalds } 12021da177e4SLinus Torvalds 1203930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1204930fc45aSChristoph Lameter unsigned long start, unsigned long end) 1205930fc45aSChristoph Lameter { 120623016969SChristoph Lameter return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 120723016969SChristoph Lameter __builtin_return_address(0)); 1208930fc45aSChristoph Lameter } 12095992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 1210930fc45aSChristoph Lameter 1211c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1212c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 1213c2968612SBenjamin Herrenschmidt void *caller) 1214c2968612SBenjamin Herrenschmidt { 1215c2968612SBenjamin Herrenschmidt return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1216c2968612SBenjamin Herrenschmidt caller); 1217c2968612SBenjamin Herrenschmidt } 1218c2968612SBenjamin Herrenschmidt 12191da177e4SLinus Torvalds /** 1220183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 12211da177e4SLinus Torvalds * @size: size of the area 12221da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 12231da177e4SLinus Torvalds * 12241da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 12251da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 12261da177e4SLinus Torvalds * on success or %NULL on failure. 12271da177e4SLinus Torvalds */ 12281da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 12291da177e4SLinus Torvalds { 123023016969SChristoph Lameter return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 123123016969SChristoph Lameter -1, GFP_KERNEL, __builtin_return_address(0)); 123223016969SChristoph Lameter } 123323016969SChristoph Lameter 123423016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 123523016969SChristoph Lameter void *caller) 123623016969SChristoph Lameter { 123723016969SChristoph Lameter return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 123823016969SChristoph Lameter -1, GFP_KERNEL, caller); 12391da177e4SLinus Torvalds } 12401da177e4SLinus Torvalds 124152fd24caSGiridhar Pemmasani struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 124252fd24caSGiridhar Pemmasani int node, gfp_t gfp_mask) 1243930fc45aSChristoph Lameter { 124452fd24caSGiridhar Pemmasani return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 124523016969SChristoph Lameter gfp_mask, __builtin_return_address(0)); 1246930fc45aSChristoph Lameter } 1247930fc45aSChristoph Lameter 1248db64fe02SNick Piggin static struct vm_struct *find_vm_area(const void *addr) 124983342314SNick Piggin { 1250db64fe02SNick Piggin struct vmap_area *va; 125183342314SNick Piggin 1252db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1253db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) 1254db64fe02SNick Piggin return va->private; 125583342314SNick Piggin 12567856dfebSAndi Kleen return NULL; 12577856dfebSAndi Kleen } 12587856dfebSAndi Kleen 12591da177e4SLinus Torvalds /** 1260183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 12611da177e4SLinus Torvalds * @addr: base address 12621da177e4SLinus Torvalds * 12631da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 12641da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 12657856dfebSAndi Kleen * on SMP machines, except for its size or flags. 12661da177e4SLinus Torvalds */ 1267b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 12681da177e4SLinus Torvalds { 1269db64fe02SNick Piggin struct vmap_area *va; 1270db64fe02SNick Piggin 1271db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1272db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) { 1273db64fe02SNick Piggin struct vm_struct *vm = va->private; 1274db64fe02SNick Piggin struct vm_struct *tmp, **p; 1275cd52858cSNick Piggin 1276cd52858cSNick Piggin vmap_debug_free_range(va->va_start, va->va_end); 1277db64fe02SNick Piggin free_unmap_vmap_area(va); 1278db64fe02SNick Piggin vm->size -= PAGE_SIZE; 1279db64fe02SNick Piggin 12801da177e4SLinus Torvalds write_lock(&vmlist_lock); 1281db64fe02SNick Piggin for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) 1282db64fe02SNick Piggin ; 1283db64fe02SNick Piggin *p = tmp->next; 12841da177e4SLinus Torvalds write_unlock(&vmlist_lock); 1285db64fe02SNick Piggin 1286db64fe02SNick Piggin return vm; 1287db64fe02SNick Piggin } 1288db64fe02SNick Piggin return NULL; 12891da177e4SLinus Torvalds } 12901da177e4SLinus Torvalds 1291b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 12921da177e4SLinus Torvalds { 12931da177e4SLinus Torvalds struct vm_struct *area; 12941da177e4SLinus Torvalds 12951da177e4SLinus Torvalds if (!addr) 12961da177e4SLinus Torvalds return; 12971da177e4SLinus Torvalds 12981da177e4SLinus Torvalds if ((PAGE_SIZE-1) & (unsigned long)addr) { 12994c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 13001da177e4SLinus Torvalds return; 13011da177e4SLinus Torvalds } 13021da177e4SLinus Torvalds 13031da177e4SLinus Torvalds area = remove_vm_area(addr); 13041da177e4SLinus Torvalds if (unlikely(!area)) { 13054c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 13061da177e4SLinus Torvalds addr); 13071da177e4SLinus Torvalds return; 13081da177e4SLinus Torvalds } 13091da177e4SLinus Torvalds 13109a11b49aSIngo Molnar debug_check_no_locks_freed(addr, area->size); 13113ac7fe5aSThomas Gleixner debug_check_no_obj_freed(addr, area->size); 13129a11b49aSIngo Molnar 13131da177e4SLinus Torvalds if (deallocate_pages) { 13141da177e4SLinus Torvalds int i; 13151da177e4SLinus Torvalds 13161da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1317bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 1318bf53d6f8SChristoph Lameter 1319bf53d6f8SChristoph Lameter BUG_ON(!page); 1320bf53d6f8SChristoph Lameter __free_page(page); 13211da177e4SLinus Torvalds } 13221da177e4SLinus Torvalds 13238757d5faSJan Kiszka if (area->flags & VM_VPAGES) 13241da177e4SLinus Torvalds vfree(area->pages); 13251da177e4SLinus Torvalds else 13261da177e4SLinus Torvalds kfree(area->pages); 13271da177e4SLinus Torvalds } 13281da177e4SLinus Torvalds 13291da177e4SLinus Torvalds kfree(area); 13301da177e4SLinus Torvalds return; 13311da177e4SLinus Torvalds } 13321da177e4SLinus Torvalds 13331da177e4SLinus Torvalds /** 13341da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 13351da177e4SLinus Torvalds * @addr: memory base address 13361da177e4SLinus Torvalds * 1337183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 133880e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 133980e93effSPekka Enberg * NULL, no operation is performed. 13401da177e4SLinus Torvalds * 134180e93effSPekka Enberg * Must not be called in interrupt context. 13421da177e4SLinus Torvalds */ 1343b3bdda02SChristoph Lameter void vfree(const void *addr) 13441da177e4SLinus Torvalds { 13451da177e4SLinus Torvalds BUG_ON(in_interrupt()); 134689219d37SCatalin Marinas 134789219d37SCatalin Marinas kmemleak_free(addr); 134889219d37SCatalin Marinas 13491da177e4SLinus Torvalds __vunmap(addr, 1); 13501da177e4SLinus Torvalds } 13511da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 13521da177e4SLinus Torvalds 13531da177e4SLinus Torvalds /** 13541da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 13551da177e4SLinus Torvalds * @addr: memory base address 13561da177e4SLinus Torvalds * 13571da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 13581da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 13591da177e4SLinus Torvalds * 136080e93effSPekka Enberg * Must not be called in interrupt context. 13611da177e4SLinus Torvalds */ 1362b3bdda02SChristoph Lameter void vunmap(const void *addr) 13631da177e4SLinus Torvalds { 13641da177e4SLinus Torvalds BUG_ON(in_interrupt()); 136534754b69SPeter Zijlstra might_sleep(); 13661da177e4SLinus Torvalds __vunmap(addr, 0); 13671da177e4SLinus Torvalds } 13681da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 13691da177e4SLinus Torvalds 13701da177e4SLinus Torvalds /** 13711da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 13721da177e4SLinus Torvalds * @pages: array of page pointers 13731da177e4SLinus Torvalds * @count: number of pages to map 13741da177e4SLinus Torvalds * @flags: vm_area->flags 13751da177e4SLinus Torvalds * @prot: page protection for the mapping 13761da177e4SLinus Torvalds * 13771da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 13781da177e4SLinus Torvalds * space. 13791da177e4SLinus Torvalds */ 13801da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 13811da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 13821da177e4SLinus Torvalds { 13831da177e4SLinus Torvalds struct vm_struct *area; 13841da177e4SLinus Torvalds 138534754b69SPeter Zijlstra might_sleep(); 138634754b69SPeter Zijlstra 13871da177e4SLinus Torvalds if (count > num_physpages) 13881da177e4SLinus Torvalds return NULL; 13891da177e4SLinus Torvalds 139023016969SChristoph Lameter area = get_vm_area_caller((count << PAGE_SHIFT), flags, 139123016969SChristoph Lameter __builtin_return_address(0)); 13921da177e4SLinus Torvalds if (!area) 13931da177e4SLinus Torvalds return NULL; 139423016969SChristoph Lameter 13951da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) { 13961da177e4SLinus Torvalds vunmap(area->addr); 13971da177e4SLinus Torvalds return NULL; 13981da177e4SLinus Torvalds } 13991da177e4SLinus Torvalds 14001da177e4SLinus Torvalds return area->addr; 14011da177e4SLinus Torvalds } 14021da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 14031da177e4SLinus Torvalds 1404db64fe02SNick Piggin static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1405db64fe02SNick Piggin int node, void *caller); 1406e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 140723016969SChristoph Lameter pgprot_t prot, int node, void *caller) 14081da177e4SLinus Torvalds { 14091da177e4SLinus Torvalds struct page **pages; 14101da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 14111da177e4SLinus Torvalds 14121da177e4SLinus Torvalds nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 14131da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 14141da177e4SLinus Torvalds 14151da177e4SLinus Torvalds area->nr_pages = nr_pages; 14161da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 14178757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 141894f6030cSChristoph Lameter pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 141923016969SChristoph Lameter PAGE_KERNEL, node, caller); 14208757d5faSJan Kiszka area->flags |= VM_VPAGES; 1421286e1ea3SAndrew Morton } else { 1422286e1ea3SAndrew Morton pages = kmalloc_node(array_size, 14236cb06229SChristoph Lameter (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, 1424286e1ea3SAndrew Morton node); 1425286e1ea3SAndrew Morton } 14261da177e4SLinus Torvalds area->pages = pages; 142723016969SChristoph Lameter area->caller = caller; 14281da177e4SLinus Torvalds if (!area->pages) { 14291da177e4SLinus Torvalds remove_vm_area(area->addr); 14301da177e4SLinus Torvalds kfree(area); 14311da177e4SLinus Torvalds return NULL; 14321da177e4SLinus Torvalds } 14331da177e4SLinus Torvalds 14341da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1435bf53d6f8SChristoph Lameter struct page *page; 1436bf53d6f8SChristoph Lameter 1437930fc45aSChristoph Lameter if (node < 0) 1438bf53d6f8SChristoph Lameter page = alloc_page(gfp_mask); 1439930fc45aSChristoph Lameter else 1440bf53d6f8SChristoph Lameter page = alloc_pages_node(node, gfp_mask, 0); 1441bf53d6f8SChristoph Lameter 1442bf53d6f8SChristoph Lameter if (unlikely(!page)) { 14431da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 14441da177e4SLinus Torvalds area->nr_pages = i; 14451da177e4SLinus Torvalds goto fail; 14461da177e4SLinus Torvalds } 1447bf53d6f8SChristoph Lameter area->pages[i] = page; 14481da177e4SLinus Torvalds } 14491da177e4SLinus Torvalds 14501da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) 14511da177e4SLinus Torvalds goto fail; 14521da177e4SLinus Torvalds return area->addr; 14531da177e4SLinus Torvalds 14541da177e4SLinus Torvalds fail: 14551da177e4SLinus Torvalds vfree(area->addr); 14561da177e4SLinus Torvalds return NULL; 14571da177e4SLinus Torvalds } 14581da177e4SLinus Torvalds 1459930fc45aSChristoph Lameter void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 1460930fc45aSChristoph Lameter { 146189219d37SCatalin Marinas void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, 146223016969SChristoph Lameter __builtin_return_address(0)); 146389219d37SCatalin Marinas 146489219d37SCatalin Marinas /* 146589219d37SCatalin Marinas * A ref_count = 3 is needed because the vm_struct and vmap_area 146689219d37SCatalin Marinas * structures allocated in the __get_vm_area_node() function contain 146789219d37SCatalin Marinas * references to the virtual address of the vmalloc'ed block. 146889219d37SCatalin Marinas */ 146989219d37SCatalin Marinas kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); 147089219d37SCatalin Marinas 147189219d37SCatalin Marinas return addr; 1472930fc45aSChristoph Lameter } 1473930fc45aSChristoph Lameter 14741da177e4SLinus Torvalds /** 1475930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 14761da177e4SLinus Torvalds * @size: allocation size 14771da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 14781da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 1479d44e0780SRandy Dunlap * @node: node to use for allocation or -1 1480c85d194bSRandy Dunlap * @caller: caller's return address 14811da177e4SLinus Torvalds * 14821da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 14831da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 14841da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 14851da177e4SLinus Torvalds */ 1486b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 148723016969SChristoph Lameter int node, void *caller) 14881da177e4SLinus Torvalds { 14891da177e4SLinus Torvalds struct vm_struct *area; 149089219d37SCatalin Marinas void *addr; 149189219d37SCatalin Marinas unsigned long real_size = size; 14921da177e4SLinus Torvalds 14931da177e4SLinus Torvalds size = PAGE_ALIGN(size); 14941da177e4SLinus Torvalds if (!size || (size >> PAGE_SHIFT) > num_physpages) 14951da177e4SLinus Torvalds return NULL; 14961da177e4SLinus Torvalds 149723016969SChristoph Lameter area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 149823016969SChristoph Lameter node, gfp_mask, caller); 149923016969SChristoph Lameter 15001da177e4SLinus Torvalds if (!area) 15011da177e4SLinus Torvalds return NULL; 15021da177e4SLinus Torvalds 150389219d37SCatalin Marinas addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); 150489219d37SCatalin Marinas 150589219d37SCatalin Marinas /* 150689219d37SCatalin Marinas * A ref_count = 3 is needed because the vm_struct and vmap_area 150789219d37SCatalin Marinas * structures allocated in the __get_vm_area_node() function contain 150889219d37SCatalin Marinas * references to the virtual address of the vmalloc'ed block. 150989219d37SCatalin Marinas */ 151089219d37SCatalin Marinas kmemleak_alloc(addr, real_size, 3, gfp_mask); 151189219d37SCatalin Marinas 151289219d37SCatalin Marinas return addr; 15131da177e4SLinus Torvalds } 15141da177e4SLinus Torvalds 1515930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1516930fc45aSChristoph Lameter { 151723016969SChristoph Lameter return __vmalloc_node(size, gfp_mask, prot, -1, 151823016969SChristoph Lameter __builtin_return_address(0)); 1519930fc45aSChristoph Lameter } 15201da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 15211da177e4SLinus Torvalds 15221da177e4SLinus Torvalds /** 15231da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 15241da177e4SLinus Torvalds * @size: allocation size 15251da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 15261da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 15271da177e4SLinus Torvalds * 1528c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 15291da177e4SLinus Torvalds * use __vmalloc() instead. 15301da177e4SLinus Torvalds */ 15311da177e4SLinus Torvalds void *vmalloc(unsigned long size) 15321da177e4SLinus Torvalds { 153323016969SChristoph Lameter return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 153423016969SChristoph Lameter -1, __builtin_return_address(0)); 15351da177e4SLinus Torvalds } 15361da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 15371da177e4SLinus Torvalds 1538930fc45aSChristoph Lameter /** 1539ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 154083342314SNick Piggin * @size: allocation size 1541ead04089SRolf Eike Beer * 1542ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 1543ead04089SRolf Eike Beer * without leaking data. 154483342314SNick Piggin */ 154583342314SNick Piggin void *vmalloc_user(unsigned long size) 154683342314SNick Piggin { 154783342314SNick Piggin struct vm_struct *area; 154883342314SNick Piggin void *ret; 154983342314SNick Piggin 155084877848SGlauber Costa ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 155184877848SGlauber Costa PAGE_KERNEL, -1, __builtin_return_address(0)); 15522b4ac44eSEric Dumazet if (ret) { 1553db64fe02SNick Piggin area = find_vm_area(ret); 155483342314SNick Piggin area->flags |= VM_USERMAP; 15552b4ac44eSEric Dumazet } 155683342314SNick Piggin return ret; 155783342314SNick Piggin } 155883342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 155983342314SNick Piggin 156083342314SNick Piggin /** 1561930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 1562930fc45aSChristoph Lameter * @size: allocation size 1563d44e0780SRandy Dunlap * @node: numa node 1564930fc45aSChristoph Lameter * 1565930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 1566930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 1567930fc45aSChristoph Lameter * 1568c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 1569930fc45aSChristoph Lameter * use __vmalloc() instead. 1570930fc45aSChristoph Lameter */ 1571930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 1572930fc45aSChristoph Lameter { 157323016969SChristoph Lameter return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 157423016969SChristoph Lameter node, __builtin_return_address(0)); 1575930fc45aSChristoph Lameter } 1576930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 1577930fc45aSChristoph Lameter 15784dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC 15794dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL 15804dc3b16bSPavel Pisa #endif 15814dc3b16bSPavel Pisa 15821da177e4SLinus Torvalds /** 15831da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 15841da177e4SLinus Torvalds * @size: allocation size 15851da177e4SLinus Torvalds * 15861da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 15871da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 15881da177e4SLinus Torvalds * executable kernel virtual space. 15891da177e4SLinus Torvalds * 1590c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 15911da177e4SLinus Torvalds * use __vmalloc() instead. 15921da177e4SLinus Torvalds */ 15931da177e4SLinus Torvalds 15941da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 15951da177e4SLinus Torvalds { 159684877848SGlauber Costa return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 159784877848SGlauber Costa -1, __builtin_return_address(0)); 15981da177e4SLinus Torvalds } 15991da177e4SLinus Torvalds 16000d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 16017ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 16020d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 16037ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 16040d08e0d3SAndi Kleen #else 16050d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL 16060d08e0d3SAndi Kleen #endif 16070d08e0d3SAndi Kleen 16081da177e4SLinus Torvalds /** 16091da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 16101da177e4SLinus Torvalds * @size: allocation size 16111da177e4SLinus Torvalds * 16121da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 16131da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 16141da177e4SLinus Torvalds */ 16151da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 16161da177e4SLinus Torvalds { 161784877848SGlauber Costa return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, 161884877848SGlauber Costa -1, __builtin_return_address(0)); 16191da177e4SLinus Torvalds } 16201da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 16211da177e4SLinus Torvalds 162283342314SNick Piggin /** 1623ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 162483342314SNick Piggin * @size: allocation size 1625ead04089SRolf Eike Beer * 1626ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 1627ead04089SRolf Eike Beer * mapped to userspace without leaking data. 162883342314SNick Piggin */ 162983342314SNick Piggin void *vmalloc_32_user(unsigned long size) 163083342314SNick Piggin { 163183342314SNick Piggin struct vm_struct *area; 163283342314SNick Piggin void *ret; 163383342314SNick Piggin 163484877848SGlauber Costa ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 163584877848SGlauber Costa -1, __builtin_return_address(0)); 16362b4ac44eSEric Dumazet if (ret) { 1637db64fe02SNick Piggin area = find_vm_area(ret); 163883342314SNick Piggin area->flags |= VM_USERMAP; 16392b4ac44eSEric Dumazet } 164083342314SNick Piggin return ret; 164183342314SNick Piggin } 164283342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 164383342314SNick Piggin 16441da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 16451da177e4SLinus Torvalds { 16461da177e4SLinus Torvalds struct vm_struct *tmp; 16471da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 16481da177e4SLinus Torvalds unsigned long n; 16491da177e4SLinus Torvalds 16501da177e4SLinus Torvalds /* Don't allow overflow */ 16511da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 16521da177e4SLinus Torvalds count = -(unsigned long) addr; 16531da177e4SLinus Torvalds 16541da177e4SLinus Torvalds read_lock(&vmlist_lock); 16551da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 16561da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 16571da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 16581da177e4SLinus Torvalds continue; 16591da177e4SLinus Torvalds while (addr < vaddr) { 16601da177e4SLinus Torvalds if (count == 0) 16611da177e4SLinus Torvalds goto finished; 16621da177e4SLinus Torvalds *buf = '\0'; 16631da177e4SLinus Torvalds buf++; 16641da177e4SLinus Torvalds addr++; 16651da177e4SLinus Torvalds count--; 16661da177e4SLinus Torvalds } 16671da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 16681da177e4SLinus Torvalds do { 16691da177e4SLinus Torvalds if (count == 0) 16701da177e4SLinus Torvalds goto finished; 16711da177e4SLinus Torvalds *buf = *addr; 16721da177e4SLinus Torvalds buf++; 16731da177e4SLinus Torvalds addr++; 16741da177e4SLinus Torvalds count--; 16751da177e4SLinus Torvalds } while (--n > 0); 16761da177e4SLinus Torvalds } 16771da177e4SLinus Torvalds finished: 16781da177e4SLinus Torvalds read_unlock(&vmlist_lock); 16791da177e4SLinus Torvalds return buf - buf_start; 16801da177e4SLinus Torvalds } 16811da177e4SLinus Torvalds 16821da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 16831da177e4SLinus Torvalds { 16841da177e4SLinus Torvalds struct vm_struct *tmp; 16851da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 16861da177e4SLinus Torvalds unsigned long n; 16871da177e4SLinus Torvalds 16881da177e4SLinus Torvalds /* Don't allow overflow */ 16891da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 16901da177e4SLinus Torvalds count = -(unsigned long) addr; 16911da177e4SLinus Torvalds 16921da177e4SLinus Torvalds read_lock(&vmlist_lock); 16931da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 16941da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 16951da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 16961da177e4SLinus Torvalds continue; 16971da177e4SLinus Torvalds while (addr < vaddr) { 16981da177e4SLinus Torvalds if (count == 0) 16991da177e4SLinus Torvalds goto finished; 17001da177e4SLinus Torvalds buf++; 17011da177e4SLinus Torvalds addr++; 17021da177e4SLinus Torvalds count--; 17031da177e4SLinus Torvalds } 17041da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 17051da177e4SLinus Torvalds do { 17061da177e4SLinus Torvalds if (count == 0) 17071da177e4SLinus Torvalds goto finished; 17081da177e4SLinus Torvalds *addr = *buf; 17091da177e4SLinus Torvalds buf++; 17101da177e4SLinus Torvalds addr++; 17111da177e4SLinus Torvalds count--; 17121da177e4SLinus Torvalds } while (--n > 0); 17131da177e4SLinus Torvalds } 17141da177e4SLinus Torvalds finished: 17151da177e4SLinus Torvalds read_unlock(&vmlist_lock); 17161da177e4SLinus Torvalds return buf - buf_start; 17171da177e4SLinus Torvalds } 171883342314SNick Piggin 171983342314SNick Piggin /** 172083342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 172183342314SNick Piggin * @vma: vma to cover (map full range of vma) 172283342314SNick Piggin * @addr: vmalloc memory 172383342314SNick Piggin * @pgoff: number of pages into addr before first page to map 17247682486bSRandy Dunlap * 17257682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 172683342314SNick Piggin * 172783342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 172883342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 172983342314SNick Piggin * that criteria isn't met. 173083342314SNick Piggin * 173172fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 173283342314SNick Piggin */ 173383342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 173483342314SNick Piggin unsigned long pgoff) 173583342314SNick Piggin { 173683342314SNick Piggin struct vm_struct *area; 173783342314SNick Piggin unsigned long uaddr = vma->vm_start; 173883342314SNick Piggin unsigned long usize = vma->vm_end - vma->vm_start; 173983342314SNick Piggin 174083342314SNick Piggin if ((PAGE_SIZE-1) & (unsigned long)addr) 174183342314SNick Piggin return -EINVAL; 174283342314SNick Piggin 1743db64fe02SNick Piggin area = find_vm_area(addr); 174483342314SNick Piggin if (!area) 1745db64fe02SNick Piggin return -EINVAL; 174683342314SNick Piggin 174783342314SNick Piggin if (!(area->flags & VM_USERMAP)) 1748db64fe02SNick Piggin return -EINVAL; 174983342314SNick Piggin 175083342314SNick Piggin if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 1751db64fe02SNick Piggin return -EINVAL; 175283342314SNick Piggin 175383342314SNick Piggin addr += pgoff << PAGE_SHIFT; 175483342314SNick Piggin do { 175583342314SNick Piggin struct page *page = vmalloc_to_page(addr); 1756db64fe02SNick Piggin int ret; 1757db64fe02SNick Piggin 175883342314SNick Piggin ret = vm_insert_page(vma, uaddr, page); 175983342314SNick Piggin if (ret) 176083342314SNick Piggin return ret; 176183342314SNick Piggin 176283342314SNick Piggin uaddr += PAGE_SIZE; 176383342314SNick Piggin addr += PAGE_SIZE; 176483342314SNick Piggin usize -= PAGE_SIZE; 176583342314SNick Piggin } while (usize > 0); 176683342314SNick Piggin 176783342314SNick Piggin /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 176883342314SNick Piggin vma->vm_flags |= VM_RESERVED; 176983342314SNick Piggin 1770db64fe02SNick Piggin return 0; 177183342314SNick Piggin } 177283342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 177383342314SNick Piggin 17741eeb66a1SChristoph Hellwig /* 17751eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 17761eeb66a1SChristoph Hellwig * have one. 17771eeb66a1SChristoph Hellwig */ 17781eeb66a1SChristoph Hellwig void __attribute__((weak)) vmalloc_sync_all(void) 17791eeb66a1SChristoph Hellwig { 17801eeb66a1SChristoph Hellwig } 17815f4352fbSJeremy Fitzhardinge 17825f4352fbSJeremy Fitzhardinge 17832f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 17845f4352fbSJeremy Fitzhardinge { 17855f4352fbSJeremy Fitzhardinge /* apply_to_page_range() does all the hard work. */ 17865f4352fbSJeremy Fitzhardinge return 0; 17875f4352fbSJeremy Fitzhardinge } 17885f4352fbSJeremy Fitzhardinge 17895f4352fbSJeremy Fitzhardinge /** 17905f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 17915f4352fbSJeremy Fitzhardinge * @size: size of the area 17927682486bSRandy Dunlap * 17937682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 17945f4352fbSJeremy Fitzhardinge * 17955f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 17965f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 17975f4352fbSJeremy Fitzhardinge * are created. If the kernel address space is not shared 17985f4352fbSJeremy Fitzhardinge * between processes, it syncs the pagetable across all 17995f4352fbSJeremy Fitzhardinge * processes. 18005f4352fbSJeremy Fitzhardinge */ 18015f4352fbSJeremy Fitzhardinge struct vm_struct *alloc_vm_area(size_t size) 18025f4352fbSJeremy Fitzhardinge { 18035f4352fbSJeremy Fitzhardinge struct vm_struct *area; 18045f4352fbSJeremy Fitzhardinge 180523016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 180623016969SChristoph Lameter __builtin_return_address(0)); 18075f4352fbSJeremy Fitzhardinge if (area == NULL) 18085f4352fbSJeremy Fitzhardinge return NULL; 18095f4352fbSJeremy Fitzhardinge 18105f4352fbSJeremy Fitzhardinge /* 18115f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 18125f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 18135f4352fbSJeremy Fitzhardinge */ 18145f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 18155f4352fbSJeremy Fitzhardinge area->size, f, NULL)) { 18165f4352fbSJeremy Fitzhardinge free_vm_area(area); 18175f4352fbSJeremy Fitzhardinge return NULL; 18185f4352fbSJeremy Fitzhardinge } 18195f4352fbSJeremy Fitzhardinge 18205f4352fbSJeremy Fitzhardinge /* Make sure the pagetables are constructed in process kernel 18215f4352fbSJeremy Fitzhardinge mappings */ 18225f4352fbSJeremy Fitzhardinge vmalloc_sync_all(); 18235f4352fbSJeremy Fitzhardinge 18245f4352fbSJeremy Fitzhardinge return area; 18255f4352fbSJeremy Fitzhardinge } 18265f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 18275f4352fbSJeremy Fitzhardinge 18285f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 18295f4352fbSJeremy Fitzhardinge { 18305f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 18315f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 18325f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 18335f4352fbSJeremy Fitzhardinge kfree(area); 18345f4352fbSJeremy Fitzhardinge } 18355f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 1836a10aa579SChristoph Lameter 1837*ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 1838*ca23e405STejun Heo { 1839*ca23e405STejun Heo return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; 1840*ca23e405STejun Heo } 1841*ca23e405STejun Heo 1842*ca23e405STejun Heo /** 1843*ca23e405STejun Heo * pvm_find_next_prev - find the next and prev vmap_area surrounding @end 1844*ca23e405STejun Heo * @end: target address 1845*ca23e405STejun Heo * @pnext: out arg for the next vmap_area 1846*ca23e405STejun Heo * @pprev: out arg for the previous vmap_area 1847*ca23e405STejun Heo * 1848*ca23e405STejun Heo * Returns: %true if either or both of next and prev are found, 1849*ca23e405STejun Heo * %false if no vmap_area exists 1850*ca23e405STejun Heo * 1851*ca23e405STejun Heo * Find vmap_areas end addresses of which enclose @end. ie. if not 1852*ca23e405STejun Heo * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. 1853*ca23e405STejun Heo */ 1854*ca23e405STejun Heo static bool pvm_find_next_prev(unsigned long end, 1855*ca23e405STejun Heo struct vmap_area **pnext, 1856*ca23e405STejun Heo struct vmap_area **pprev) 1857*ca23e405STejun Heo { 1858*ca23e405STejun Heo struct rb_node *n = vmap_area_root.rb_node; 1859*ca23e405STejun Heo struct vmap_area *va = NULL; 1860*ca23e405STejun Heo 1861*ca23e405STejun Heo while (n) { 1862*ca23e405STejun Heo va = rb_entry(n, struct vmap_area, rb_node); 1863*ca23e405STejun Heo if (end < va->va_end) 1864*ca23e405STejun Heo n = n->rb_left; 1865*ca23e405STejun Heo else if (end > va->va_end) 1866*ca23e405STejun Heo n = n->rb_right; 1867*ca23e405STejun Heo else 1868*ca23e405STejun Heo break; 1869*ca23e405STejun Heo } 1870*ca23e405STejun Heo 1871*ca23e405STejun Heo if (!va) 1872*ca23e405STejun Heo return false; 1873*ca23e405STejun Heo 1874*ca23e405STejun Heo if (va->va_end > end) { 1875*ca23e405STejun Heo *pnext = va; 1876*ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 1877*ca23e405STejun Heo } else { 1878*ca23e405STejun Heo *pprev = va; 1879*ca23e405STejun Heo *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); 1880*ca23e405STejun Heo } 1881*ca23e405STejun Heo return true; 1882*ca23e405STejun Heo } 1883*ca23e405STejun Heo 1884*ca23e405STejun Heo /** 1885*ca23e405STejun Heo * pvm_determine_end - find the highest aligned address between two vmap_areas 1886*ca23e405STejun Heo * @pnext: in/out arg for the next vmap_area 1887*ca23e405STejun Heo * @pprev: in/out arg for the previous vmap_area 1888*ca23e405STejun Heo * @align: alignment 1889*ca23e405STejun Heo * 1890*ca23e405STejun Heo * Returns: determined end address 1891*ca23e405STejun Heo * 1892*ca23e405STejun Heo * Find the highest aligned address between *@pnext and *@pprev below 1893*ca23e405STejun Heo * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned 1894*ca23e405STejun Heo * down address is between the end addresses of the two vmap_areas. 1895*ca23e405STejun Heo * 1896*ca23e405STejun Heo * Please note that the address returned by this function may fall 1897*ca23e405STejun Heo * inside *@pnext vmap_area. The caller is responsible for checking 1898*ca23e405STejun Heo * that. 1899*ca23e405STejun Heo */ 1900*ca23e405STejun Heo static unsigned long pvm_determine_end(struct vmap_area **pnext, 1901*ca23e405STejun Heo struct vmap_area **pprev, 1902*ca23e405STejun Heo unsigned long align) 1903*ca23e405STejun Heo { 1904*ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 1905*ca23e405STejun Heo unsigned long addr; 1906*ca23e405STejun Heo 1907*ca23e405STejun Heo if (*pnext) 1908*ca23e405STejun Heo addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); 1909*ca23e405STejun Heo else 1910*ca23e405STejun Heo addr = vmalloc_end; 1911*ca23e405STejun Heo 1912*ca23e405STejun Heo while (*pprev && (*pprev)->va_end > addr) { 1913*ca23e405STejun Heo *pnext = *pprev; 1914*ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 1915*ca23e405STejun Heo } 1916*ca23e405STejun Heo 1917*ca23e405STejun Heo return addr; 1918*ca23e405STejun Heo } 1919*ca23e405STejun Heo 1920*ca23e405STejun Heo /** 1921*ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 1922*ca23e405STejun Heo * @offsets: array containing offset of each area 1923*ca23e405STejun Heo * @sizes: array containing size of each area 1924*ca23e405STejun Heo * @nr_vms: the number of areas to allocate 1925*ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 1926*ca23e405STejun Heo * @gfp_mask: allocation mask 1927*ca23e405STejun Heo * 1928*ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 1929*ca23e405STejun Heo * vm_structs on success, %NULL on failure 1930*ca23e405STejun Heo * 1931*ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 1932*ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 1933*ca23e405STejun Heo * congruent vmalloc areas for it. These areas tend to be scattered 1934*ca23e405STejun Heo * pretty far, distance between two areas easily going up to 1935*ca23e405STejun Heo * gigabytes. To avoid interacting with regular vmallocs, these areas 1936*ca23e405STejun Heo * are allocated from top. 1937*ca23e405STejun Heo * 1938*ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 1939*ca23e405STejun Heo * does everything top-down and scans areas from the end looking for 1940*ca23e405STejun Heo * matching slot. While scanning, if any of the areas overlaps with 1941*ca23e405STejun Heo * existing vmap_area, the base address is pulled down to fit the 1942*ca23e405STejun Heo * area. Scanning is repeated till all the areas fit and then all 1943*ca23e405STejun Heo * necessary data structres are inserted and the result is returned. 1944*ca23e405STejun Heo */ 1945*ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 1946*ca23e405STejun Heo const size_t *sizes, int nr_vms, 1947*ca23e405STejun Heo size_t align, gfp_t gfp_mask) 1948*ca23e405STejun Heo { 1949*ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 1950*ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 1951*ca23e405STejun Heo struct vmap_area **vas, *prev, *next; 1952*ca23e405STejun Heo struct vm_struct **vms; 1953*ca23e405STejun Heo int area, area2, last_area, term_area; 1954*ca23e405STejun Heo unsigned long base, start, end, last_end; 1955*ca23e405STejun Heo bool purged = false; 1956*ca23e405STejun Heo 1957*ca23e405STejun Heo gfp_mask &= GFP_RECLAIM_MASK; 1958*ca23e405STejun Heo 1959*ca23e405STejun Heo /* verify parameters and allocate data structures */ 1960*ca23e405STejun Heo BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); 1961*ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 1962*ca23e405STejun Heo start = offsets[area]; 1963*ca23e405STejun Heo end = start + sizes[area]; 1964*ca23e405STejun Heo 1965*ca23e405STejun Heo /* is everything aligned properly? */ 1966*ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 1967*ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 1968*ca23e405STejun Heo 1969*ca23e405STejun Heo /* detect the area with the highest address */ 1970*ca23e405STejun Heo if (start > offsets[last_area]) 1971*ca23e405STejun Heo last_area = area; 1972*ca23e405STejun Heo 1973*ca23e405STejun Heo for (area2 = 0; area2 < nr_vms; area2++) { 1974*ca23e405STejun Heo unsigned long start2 = offsets[area2]; 1975*ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 1976*ca23e405STejun Heo 1977*ca23e405STejun Heo if (area2 == area) 1978*ca23e405STejun Heo continue; 1979*ca23e405STejun Heo 1980*ca23e405STejun Heo BUG_ON(start2 >= start && start2 < end); 1981*ca23e405STejun Heo BUG_ON(end2 <= end && end2 > start); 1982*ca23e405STejun Heo } 1983*ca23e405STejun Heo } 1984*ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 1985*ca23e405STejun Heo 1986*ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 1987*ca23e405STejun Heo WARN_ON(true); 1988*ca23e405STejun Heo return NULL; 1989*ca23e405STejun Heo } 1990*ca23e405STejun Heo 1991*ca23e405STejun Heo vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); 1992*ca23e405STejun Heo vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); 1993*ca23e405STejun Heo if (!vas || !vms) 1994*ca23e405STejun Heo goto err_free; 1995*ca23e405STejun Heo 1996*ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 1997*ca23e405STejun Heo vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); 1998*ca23e405STejun Heo vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); 1999*ca23e405STejun Heo if (!vas[area] || !vms[area]) 2000*ca23e405STejun Heo goto err_free; 2001*ca23e405STejun Heo } 2002*ca23e405STejun Heo retry: 2003*ca23e405STejun Heo spin_lock(&vmap_area_lock); 2004*ca23e405STejun Heo 2005*ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 2006*ca23e405STejun Heo area = term_area = last_area; 2007*ca23e405STejun Heo start = offsets[area]; 2008*ca23e405STejun Heo end = start + sizes[area]; 2009*ca23e405STejun Heo 2010*ca23e405STejun Heo if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { 2011*ca23e405STejun Heo base = vmalloc_end - last_end; 2012*ca23e405STejun Heo goto found; 2013*ca23e405STejun Heo } 2014*ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2015*ca23e405STejun Heo 2016*ca23e405STejun Heo while (true) { 2017*ca23e405STejun Heo BUG_ON(next && next->va_end <= base + end); 2018*ca23e405STejun Heo BUG_ON(prev && prev->va_end > base + end); 2019*ca23e405STejun Heo 2020*ca23e405STejun Heo /* 2021*ca23e405STejun Heo * base might have underflowed, add last_end before 2022*ca23e405STejun Heo * comparing. 2023*ca23e405STejun Heo */ 2024*ca23e405STejun Heo if (base + last_end < vmalloc_start + last_end) { 2025*ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2026*ca23e405STejun Heo if (!purged) { 2027*ca23e405STejun Heo purge_vmap_area_lazy(); 2028*ca23e405STejun Heo purged = true; 2029*ca23e405STejun Heo goto retry; 2030*ca23e405STejun Heo } 2031*ca23e405STejun Heo goto err_free; 2032*ca23e405STejun Heo } 2033*ca23e405STejun Heo 2034*ca23e405STejun Heo /* 2035*ca23e405STejun Heo * If next overlaps, move base downwards so that it's 2036*ca23e405STejun Heo * right below next and then recheck. 2037*ca23e405STejun Heo */ 2038*ca23e405STejun Heo if (next && next->va_start < base + end) { 2039*ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2040*ca23e405STejun Heo term_area = area; 2041*ca23e405STejun Heo continue; 2042*ca23e405STejun Heo } 2043*ca23e405STejun Heo 2044*ca23e405STejun Heo /* 2045*ca23e405STejun Heo * If prev overlaps, shift down next and prev and move 2046*ca23e405STejun Heo * base so that it's right below new next and then 2047*ca23e405STejun Heo * recheck. 2048*ca23e405STejun Heo */ 2049*ca23e405STejun Heo if (prev && prev->va_end > base + start) { 2050*ca23e405STejun Heo next = prev; 2051*ca23e405STejun Heo prev = node_to_va(rb_prev(&next->rb_node)); 2052*ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2053*ca23e405STejun Heo term_area = area; 2054*ca23e405STejun Heo continue; 2055*ca23e405STejun Heo } 2056*ca23e405STejun Heo 2057*ca23e405STejun Heo /* 2058*ca23e405STejun Heo * This area fits, move on to the previous one. If 2059*ca23e405STejun Heo * the previous one is the terminal one, we're done. 2060*ca23e405STejun Heo */ 2061*ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 2062*ca23e405STejun Heo if (area == term_area) 2063*ca23e405STejun Heo break; 2064*ca23e405STejun Heo start = offsets[area]; 2065*ca23e405STejun Heo end = start + sizes[area]; 2066*ca23e405STejun Heo pvm_find_next_prev(base + end, &next, &prev); 2067*ca23e405STejun Heo } 2068*ca23e405STejun Heo found: 2069*ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 2070*ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2071*ca23e405STejun Heo struct vmap_area *va = vas[area]; 2072*ca23e405STejun Heo 2073*ca23e405STejun Heo va->va_start = base + offsets[area]; 2074*ca23e405STejun Heo va->va_end = va->va_start + sizes[area]; 2075*ca23e405STejun Heo __insert_vmap_area(va); 2076*ca23e405STejun Heo } 2077*ca23e405STejun Heo 2078*ca23e405STejun Heo vmap_area_pcpu_hole = base + offsets[last_area]; 2079*ca23e405STejun Heo 2080*ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2081*ca23e405STejun Heo 2082*ca23e405STejun Heo /* insert all vm's */ 2083*ca23e405STejun Heo for (area = 0; area < nr_vms; area++) 2084*ca23e405STejun Heo insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2085*ca23e405STejun Heo pcpu_get_vm_areas); 2086*ca23e405STejun Heo 2087*ca23e405STejun Heo kfree(vas); 2088*ca23e405STejun Heo return vms; 2089*ca23e405STejun Heo 2090*ca23e405STejun Heo err_free: 2091*ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2092*ca23e405STejun Heo if (vas) 2093*ca23e405STejun Heo kfree(vas[area]); 2094*ca23e405STejun Heo if (vms) 2095*ca23e405STejun Heo kfree(vms[area]); 2096*ca23e405STejun Heo } 2097*ca23e405STejun Heo kfree(vas); 2098*ca23e405STejun Heo kfree(vms); 2099*ca23e405STejun Heo return NULL; 2100*ca23e405STejun Heo } 2101*ca23e405STejun Heo 2102*ca23e405STejun Heo /** 2103*ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 2104*ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 2105*ca23e405STejun Heo * @nr_vms: the number of allocated areas 2106*ca23e405STejun Heo * 2107*ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 2108*ca23e405STejun Heo */ 2109*ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 2110*ca23e405STejun Heo { 2111*ca23e405STejun Heo int i; 2112*ca23e405STejun Heo 2113*ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 2114*ca23e405STejun Heo free_vm_area(vms[i]); 2115*ca23e405STejun Heo kfree(vms); 2116*ca23e405STejun Heo } 2117a10aa579SChristoph Lameter 2118a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 2119a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 2120a10aa579SChristoph Lameter { 2121a10aa579SChristoph Lameter loff_t n = *pos; 2122a10aa579SChristoph Lameter struct vm_struct *v; 2123a10aa579SChristoph Lameter 2124a10aa579SChristoph Lameter read_lock(&vmlist_lock); 2125a10aa579SChristoph Lameter v = vmlist; 2126a10aa579SChristoph Lameter while (n > 0 && v) { 2127a10aa579SChristoph Lameter n--; 2128a10aa579SChristoph Lameter v = v->next; 2129a10aa579SChristoph Lameter } 2130a10aa579SChristoph Lameter if (!n) 2131a10aa579SChristoph Lameter return v; 2132a10aa579SChristoph Lameter 2133a10aa579SChristoph Lameter return NULL; 2134a10aa579SChristoph Lameter 2135a10aa579SChristoph Lameter } 2136a10aa579SChristoph Lameter 2137a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 2138a10aa579SChristoph Lameter { 2139a10aa579SChristoph Lameter struct vm_struct *v = p; 2140a10aa579SChristoph Lameter 2141a10aa579SChristoph Lameter ++*pos; 2142a10aa579SChristoph Lameter return v->next; 2143a10aa579SChristoph Lameter } 2144a10aa579SChristoph Lameter 2145a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 2146a10aa579SChristoph Lameter { 2147a10aa579SChristoph Lameter read_unlock(&vmlist_lock); 2148a10aa579SChristoph Lameter } 2149a10aa579SChristoph Lameter 2150a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 2151a47a126aSEric Dumazet { 2152a47a126aSEric Dumazet if (NUMA_BUILD) { 2153a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 2154a47a126aSEric Dumazet 2155a47a126aSEric Dumazet if (!counters) 2156a47a126aSEric Dumazet return; 2157a47a126aSEric Dumazet 2158a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2159a47a126aSEric Dumazet 2160a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 2161a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 2162a47a126aSEric Dumazet 2163a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 2164a47a126aSEric Dumazet if (counters[nr]) 2165a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 2166a47a126aSEric Dumazet } 2167a47a126aSEric Dumazet } 2168a47a126aSEric Dumazet 2169a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 2170a10aa579SChristoph Lameter { 2171a10aa579SChristoph Lameter struct vm_struct *v = p; 2172a10aa579SChristoph Lameter 2173a10aa579SChristoph Lameter seq_printf(m, "0x%p-0x%p %7ld", 2174a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 2175a10aa579SChristoph Lameter 217623016969SChristoph Lameter if (v->caller) { 21779c246247SHugh Dickins char buff[KSYM_SYMBOL_LEN]; 217823016969SChristoph Lameter 217923016969SChristoph Lameter seq_putc(m, ' '); 218023016969SChristoph Lameter sprint_symbol(buff, (unsigned long)v->caller); 218123016969SChristoph Lameter seq_puts(m, buff); 218223016969SChristoph Lameter } 218323016969SChristoph Lameter 2184a10aa579SChristoph Lameter if (v->nr_pages) 2185a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 2186a10aa579SChristoph Lameter 2187a10aa579SChristoph Lameter if (v->phys_addr) 2188a10aa579SChristoph Lameter seq_printf(m, " phys=%lx", v->phys_addr); 2189a10aa579SChristoph Lameter 2190a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 2191a10aa579SChristoph Lameter seq_printf(m, " ioremap"); 2192a10aa579SChristoph Lameter 2193a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 2194a10aa579SChristoph Lameter seq_printf(m, " vmalloc"); 2195a10aa579SChristoph Lameter 2196a10aa579SChristoph Lameter if (v->flags & VM_MAP) 2197a10aa579SChristoph Lameter seq_printf(m, " vmap"); 2198a10aa579SChristoph Lameter 2199a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 2200a10aa579SChristoph Lameter seq_printf(m, " user"); 2201a10aa579SChristoph Lameter 2202a10aa579SChristoph Lameter if (v->flags & VM_VPAGES) 2203a10aa579SChristoph Lameter seq_printf(m, " vpages"); 2204a10aa579SChristoph Lameter 2205a47a126aSEric Dumazet show_numa_info(m, v); 2206a10aa579SChristoph Lameter seq_putc(m, '\n'); 2207a10aa579SChristoph Lameter return 0; 2208a10aa579SChristoph Lameter } 2209a10aa579SChristoph Lameter 22105f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 2211a10aa579SChristoph Lameter .start = s_start, 2212a10aa579SChristoph Lameter .next = s_next, 2213a10aa579SChristoph Lameter .stop = s_stop, 2214a10aa579SChristoph Lameter .show = s_show, 2215a10aa579SChristoph Lameter }; 22165f6a6a9cSAlexey Dobriyan 22175f6a6a9cSAlexey Dobriyan static int vmalloc_open(struct inode *inode, struct file *file) 22185f6a6a9cSAlexey Dobriyan { 22195f6a6a9cSAlexey Dobriyan unsigned int *ptr = NULL; 22205f6a6a9cSAlexey Dobriyan int ret; 22215f6a6a9cSAlexey Dobriyan 22225f6a6a9cSAlexey Dobriyan if (NUMA_BUILD) 22235f6a6a9cSAlexey Dobriyan ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 22245f6a6a9cSAlexey Dobriyan ret = seq_open(file, &vmalloc_op); 22255f6a6a9cSAlexey Dobriyan if (!ret) { 22265f6a6a9cSAlexey Dobriyan struct seq_file *m = file->private_data; 22275f6a6a9cSAlexey Dobriyan m->private = ptr; 22285f6a6a9cSAlexey Dobriyan } else 22295f6a6a9cSAlexey Dobriyan kfree(ptr); 22305f6a6a9cSAlexey Dobriyan return ret; 22315f6a6a9cSAlexey Dobriyan } 22325f6a6a9cSAlexey Dobriyan 22335f6a6a9cSAlexey Dobriyan static const struct file_operations proc_vmalloc_operations = { 22345f6a6a9cSAlexey Dobriyan .open = vmalloc_open, 22355f6a6a9cSAlexey Dobriyan .read = seq_read, 22365f6a6a9cSAlexey Dobriyan .llseek = seq_lseek, 22375f6a6a9cSAlexey Dobriyan .release = seq_release_private, 22385f6a6a9cSAlexey Dobriyan }; 22395f6a6a9cSAlexey Dobriyan 22405f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 22415f6a6a9cSAlexey Dobriyan { 22425f6a6a9cSAlexey Dobriyan proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 22435f6a6a9cSAlexey Dobriyan return 0; 22445f6a6a9cSAlexey Dobriyan } 22455f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 2246a10aa579SChristoph Lameter #endif 2247a10aa579SChristoph Lameter 2248