11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 151da177e4SLinus Torvalds #include <linux/slab.h> 161da177e4SLinus Torvalds #include <linux/spinlock.h> 17e97a630eSNick Piggin #include <linux/mutex.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 213ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2223016969SChristoph Lameter #include <linux/kallsyms.h> 23db64fe02SNick Piggin #include <linux/list.h> 24db64fe02SNick Piggin #include <linux/rbtree.h> 25db64fe02SNick Piggin #include <linux/radix-tree.h> 26db64fe02SNick Piggin #include <linux/rcupdate.h> 27*822c18f2SIvan Kokshaysky #include <linux/bootmem.h> 281da177e4SLinus Torvalds 29db64fe02SNick Piggin #include <asm/atomic.h> 301da177e4SLinus Torvalds #include <asm/uaccess.h> 311da177e4SLinus Torvalds #include <asm/tlbflush.h> 321da177e4SLinus Torvalds 331da177e4SLinus Torvalds 34db64fe02SNick Piggin /*** Page table manipulation functions ***/ 35b221385bSAdrian Bunk 361da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 371da177e4SLinus Torvalds { 381da177e4SLinus Torvalds pte_t *pte; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 411da177e4SLinus Torvalds do { 421da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 431da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 441da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 451da177e4SLinus Torvalds } 461da177e4SLinus Torvalds 47db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 481da177e4SLinus Torvalds { 491da177e4SLinus Torvalds pmd_t *pmd; 501da177e4SLinus Torvalds unsigned long next; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 531da177e4SLinus Torvalds do { 541da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 551da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 561da177e4SLinus Torvalds continue; 571da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 581da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 591da177e4SLinus Torvalds } 601da177e4SLinus Torvalds 61db64fe02SNick Piggin static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 621da177e4SLinus Torvalds { 631da177e4SLinus Torvalds pud_t *pud; 641da177e4SLinus Torvalds unsigned long next; 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 671da177e4SLinus Torvalds do { 681da177e4SLinus Torvalds next = pud_addr_end(addr, end); 691da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 701da177e4SLinus Torvalds continue; 711da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 721da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 731da177e4SLinus Torvalds } 741da177e4SLinus Torvalds 75db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 761da177e4SLinus Torvalds { 771da177e4SLinus Torvalds pgd_t *pgd; 781da177e4SLinus Torvalds unsigned long next; 791da177e4SLinus Torvalds 801da177e4SLinus Torvalds BUG_ON(addr >= end); 811da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 821da177e4SLinus Torvalds do { 831da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 841da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 851da177e4SLinus Torvalds continue; 861da177e4SLinus Torvalds vunmap_pud_range(pgd, addr, next); 871da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 881da177e4SLinus Torvalds } 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 91db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 921da177e4SLinus Torvalds { 931da177e4SLinus Torvalds pte_t *pte; 941da177e4SLinus Torvalds 95db64fe02SNick Piggin /* 96db64fe02SNick Piggin * nr is a running index into the array which helps higher level 97db64fe02SNick Piggin * callers keep track of where we're up to. 98db64fe02SNick Piggin */ 99db64fe02SNick Piggin 100872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1011da177e4SLinus Torvalds if (!pte) 1021da177e4SLinus Torvalds return -ENOMEM; 1031da177e4SLinus Torvalds do { 104db64fe02SNick Piggin struct page *page = pages[*nr]; 105db64fe02SNick Piggin 106db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 107db64fe02SNick Piggin return -EBUSY; 108db64fe02SNick Piggin if (WARN_ON(!page)) 1091da177e4SLinus Torvalds return -ENOMEM; 1101da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 111db64fe02SNick Piggin (*nr)++; 1121da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1131da177e4SLinus Torvalds return 0; 1141da177e4SLinus Torvalds } 1151da177e4SLinus Torvalds 116db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 117db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1181da177e4SLinus Torvalds { 1191da177e4SLinus Torvalds pmd_t *pmd; 1201da177e4SLinus Torvalds unsigned long next; 1211da177e4SLinus Torvalds 1221da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1231da177e4SLinus Torvalds if (!pmd) 1241da177e4SLinus Torvalds return -ENOMEM; 1251da177e4SLinus Torvalds do { 1261da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 127db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1281da177e4SLinus Torvalds return -ENOMEM; 1291da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1301da177e4SLinus Torvalds return 0; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 133db64fe02SNick Piggin static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 134db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1351da177e4SLinus Torvalds { 1361da177e4SLinus Torvalds pud_t *pud; 1371da177e4SLinus Torvalds unsigned long next; 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds pud = pud_alloc(&init_mm, pgd, addr); 1401da177e4SLinus Torvalds if (!pud) 1411da177e4SLinus Torvalds return -ENOMEM; 1421da177e4SLinus Torvalds do { 1431da177e4SLinus Torvalds next = pud_addr_end(addr, end); 144db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 1451da177e4SLinus Torvalds return -ENOMEM; 1461da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1471da177e4SLinus Torvalds return 0; 1481da177e4SLinus Torvalds } 1491da177e4SLinus Torvalds 150db64fe02SNick Piggin /* 151db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 152db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 153db64fe02SNick Piggin * 154db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 155db64fe02SNick Piggin */ 1562e4e27c7SAdam Lackorzynski static int vmap_page_range(unsigned long start, unsigned long end, 157db64fe02SNick Piggin pgprot_t prot, struct page **pages) 1581da177e4SLinus Torvalds { 1591da177e4SLinus Torvalds pgd_t *pgd; 1601da177e4SLinus Torvalds unsigned long next; 1612e4e27c7SAdam Lackorzynski unsigned long addr = start; 162db64fe02SNick Piggin int err = 0; 163db64fe02SNick Piggin int nr = 0; 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds BUG_ON(addr >= end); 1661da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1671da177e4SLinus Torvalds do { 1681da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 169db64fe02SNick Piggin err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 1701da177e4SLinus Torvalds if (err) 1711da177e4SLinus Torvalds break; 1721da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1732e4e27c7SAdam Lackorzynski flush_cache_vmap(start, end); 174db64fe02SNick Piggin 175db64fe02SNick Piggin if (unlikely(err)) 1761da177e4SLinus Torvalds return err; 177db64fe02SNick Piggin return nr; 1781da177e4SLinus Torvalds } 1791da177e4SLinus Torvalds 18073bdf0a6SLinus Torvalds static inline int is_vmalloc_or_module_addr(const void *x) 18173bdf0a6SLinus Torvalds { 18273bdf0a6SLinus Torvalds /* 183ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 18473bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 18573bdf0a6SLinus Torvalds * just put it in the vmalloc space. 18673bdf0a6SLinus Torvalds */ 18773bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 18873bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 18973bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 19073bdf0a6SLinus Torvalds return 1; 19173bdf0a6SLinus Torvalds #endif 19273bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 19373bdf0a6SLinus Torvalds } 19473bdf0a6SLinus Torvalds 19548667e7aSChristoph Lameter /* 196db64fe02SNick Piggin * Walk a vmap address to the struct page it maps. 19748667e7aSChristoph Lameter */ 198b3bdda02SChristoph Lameter struct page *vmalloc_to_page(const void *vmalloc_addr) 19948667e7aSChristoph Lameter { 20048667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 20148667e7aSChristoph Lameter struct page *page = NULL; 20248667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 20348667e7aSChristoph Lameter 2047aa413deSIngo Molnar /* 2057aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2067aa413deSIngo Molnar * architectures that do not vmalloc module space 2077aa413deSIngo Molnar */ 20873bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 20959ea7463SJiri Slaby 21048667e7aSChristoph Lameter if (!pgd_none(*pgd)) { 211db64fe02SNick Piggin pud_t *pud = pud_offset(pgd, addr); 21248667e7aSChristoph Lameter if (!pud_none(*pud)) { 213db64fe02SNick Piggin pmd_t *pmd = pmd_offset(pud, addr); 21448667e7aSChristoph Lameter if (!pmd_none(*pmd)) { 215db64fe02SNick Piggin pte_t *ptep, pte; 216db64fe02SNick Piggin 21748667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 21848667e7aSChristoph Lameter pte = *ptep; 21948667e7aSChristoph Lameter if (pte_present(pte)) 22048667e7aSChristoph Lameter page = pte_page(pte); 22148667e7aSChristoph Lameter pte_unmap(ptep); 22248667e7aSChristoph Lameter } 22348667e7aSChristoph Lameter } 22448667e7aSChristoph Lameter } 22548667e7aSChristoph Lameter return page; 22648667e7aSChristoph Lameter } 22748667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_page); 22848667e7aSChristoph Lameter 22948667e7aSChristoph Lameter /* 23048667e7aSChristoph Lameter * Map a vmalloc()-space virtual address to the physical page frame number. 23148667e7aSChristoph Lameter */ 232b3bdda02SChristoph Lameter unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 23348667e7aSChristoph Lameter { 23448667e7aSChristoph Lameter return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 23548667e7aSChristoph Lameter } 23648667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_pfn); 23748667e7aSChristoph Lameter 238db64fe02SNick Piggin 239db64fe02SNick Piggin /*** Global kva allocator ***/ 240db64fe02SNick Piggin 241db64fe02SNick Piggin #define VM_LAZY_FREE 0x01 242db64fe02SNick Piggin #define VM_LAZY_FREEING 0x02 243db64fe02SNick Piggin #define VM_VM_AREA 0x04 244db64fe02SNick Piggin 245db64fe02SNick Piggin struct vmap_area { 246db64fe02SNick Piggin unsigned long va_start; 247db64fe02SNick Piggin unsigned long va_end; 248db64fe02SNick Piggin unsigned long flags; 249db64fe02SNick Piggin struct rb_node rb_node; /* address sorted rbtree */ 250db64fe02SNick Piggin struct list_head list; /* address sorted list */ 251db64fe02SNick Piggin struct list_head purge_list; /* "lazy purge" list */ 252db64fe02SNick Piggin void *private; 253db64fe02SNick Piggin struct rcu_head rcu_head; 254db64fe02SNick Piggin }; 255db64fe02SNick Piggin 256db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 257db64fe02SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 258db64fe02SNick Piggin static LIST_HEAD(vmap_area_list); 259db64fe02SNick Piggin 260db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 2611da177e4SLinus Torvalds { 262db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 263db64fe02SNick Piggin 264db64fe02SNick Piggin while (n) { 265db64fe02SNick Piggin struct vmap_area *va; 266db64fe02SNick Piggin 267db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 268db64fe02SNick Piggin if (addr < va->va_start) 269db64fe02SNick Piggin n = n->rb_left; 270db64fe02SNick Piggin else if (addr > va->va_start) 271db64fe02SNick Piggin n = n->rb_right; 272db64fe02SNick Piggin else 273db64fe02SNick Piggin return va; 274db64fe02SNick Piggin } 275db64fe02SNick Piggin 276db64fe02SNick Piggin return NULL; 277db64fe02SNick Piggin } 278db64fe02SNick Piggin 279db64fe02SNick Piggin static void __insert_vmap_area(struct vmap_area *va) 280db64fe02SNick Piggin { 281db64fe02SNick Piggin struct rb_node **p = &vmap_area_root.rb_node; 282db64fe02SNick Piggin struct rb_node *parent = NULL; 283db64fe02SNick Piggin struct rb_node *tmp; 284db64fe02SNick Piggin 285db64fe02SNick Piggin while (*p) { 286db64fe02SNick Piggin struct vmap_area *tmp; 287db64fe02SNick Piggin 288db64fe02SNick Piggin parent = *p; 289db64fe02SNick Piggin tmp = rb_entry(parent, struct vmap_area, rb_node); 290db64fe02SNick Piggin if (va->va_start < tmp->va_end) 291db64fe02SNick Piggin p = &(*p)->rb_left; 292db64fe02SNick Piggin else if (va->va_end > tmp->va_start) 293db64fe02SNick Piggin p = &(*p)->rb_right; 294db64fe02SNick Piggin else 295db64fe02SNick Piggin BUG(); 296db64fe02SNick Piggin } 297db64fe02SNick Piggin 298db64fe02SNick Piggin rb_link_node(&va->rb_node, parent, p); 299db64fe02SNick Piggin rb_insert_color(&va->rb_node, &vmap_area_root); 300db64fe02SNick Piggin 301db64fe02SNick Piggin /* address-sort this list so it is usable like the vmlist */ 302db64fe02SNick Piggin tmp = rb_prev(&va->rb_node); 303db64fe02SNick Piggin if (tmp) { 304db64fe02SNick Piggin struct vmap_area *prev; 305db64fe02SNick Piggin prev = rb_entry(tmp, struct vmap_area, rb_node); 306db64fe02SNick Piggin list_add_rcu(&va->list, &prev->list); 307db64fe02SNick Piggin } else 308db64fe02SNick Piggin list_add_rcu(&va->list, &vmap_area_list); 309db64fe02SNick Piggin } 310db64fe02SNick Piggin 311db64fe02SNick Piggin static void purge_vmap_area_lazy(void); 312db64fe02SNick Piggin 313db64fe02SNick Piggin /* 314db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 315db64fe02SNick Piggin * vstart and vend. 316db64fe02SNick Piggin */ 317db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 318db64fe02SNick Piggin unsigned long align, 319db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 320db64fe02SNick Piggin int node, gfp_t gfp_mask) 321db64fe02SNick Piggin { 322db64fe02SNick Piggin struct vmap_area *va; 323db64fe02SNick Piggin struct rb_node *n; 3241da177e4SLinus Torvalds unsigned long addr; 325db64fe02SNick Piggin int purged = 0; 326db64fe02SNick Piggin 327db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 328db64fe02SNick Piggin 329db64fe02SNick Piggin va = kmalloc_node(sizeof(struct vmap_area), 330db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 331db64fe02SNick Piggin if (unlikely(!va)) 332db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 333db64fe02SNick Piggin 334db64fe02SNick Piggin retry: 3350ae15132SGlauber Costa addr = ALIGN(vstart, align); 3360ae15132SGlauber Costa 337db64fe02SNick Piggin spin_lock(&vmap_area_lock); 338db64fe02SNick Piggin /* XXX: could have a last_hole cache */ 339db64fe02SNick Piggin n = vmap_area_root.rb_node; 340db64fe02SNick Piggin if (n) { 341db64fe02SNick Piggin struct vmap_area *first = NULL; 342db64fe02SNick Piggin 343db64fe02SNick Piggin do { 344db64fe02SNick Piggin struct vmap_area *tmp; 345db64fe02SNick Piggin tmp = rb_entry(n, struct vmap_area, rb_node); 346db64fe02SNick Piggin if (tmp->va_end >= addr) { 347db64fe02SNick Piggin if (!first && tmp->va_start < addr + size) 348db64fe02SNick Piggin first = tmp; 349db64fe02SNick Piggin n = n->rb_left; 350db64fe02SNick Piggin } else { 351db64fe02SNick Piggin first = tmp; 352db64fe02SNick Piggin n = n->rb_right; 353db64fe02SNick Piggin } 354db64fe02SNick Piggin } while (n); 355db64fe02SNick Piggin 356db64fe02SNick Piggin if (!first) 357db64fe02SNick Piggin goto found; 358db64fe02SNick Piggin 359db64fe02SNick Piggin if (first->va_end < addr) { 360db64fe02SNick Piggin n = rb_next(&first->rb_node); 361db64fe02SNick Piggin if (n) 362db64fe02SNick Piggin first = rb_entry(n, struct vmap_area, rb_node); 363db64fe02SNick Piggin else 364db64fe02SNick Piggin goto found; 365db64fe02SNick Piggin } 366db64fe02SNick Piggin 367f011c2daSNick Piggin while (addr + size > first->va_start && addr + size <= vend) { 368db64fe02SNick Piggin addr = ALIGN(first->va_end + PAGE_SIZE, align); 369db64fe02SNick Piggin 370db64fe02SNick Piggin n = rb_next(&first->rb_node); 371db64fe02SNick Piggin if (n) 372db64fe02SNick Piggin first = rb_entry(n, struct vmap_area, rb_node); 373db64fe02SNick Piggin else 374db64fe02SNick Piggin goto found; 375db64fe02SNick Piggin } 376db64fe02SNick Piggin } 377db64fe02SNick Piggin found: 378db64fe02SNick Piggin if (addr + size > vend) { 379db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 380db64fe02SNick Piggin if (!purged) { 381db64fe02SNick Piggin purge_vmap_area_lazy(); 382db64fe02SNick Piggin purged = 1; 383db64fe02SNick Piggin goto retry; 384db64fe02SNick Piggin } 385db64fe02SNick Piggin if (printk_ratelimit()) 386c1279c4eSGlauber Costa printk(KERN_WARNING 387c1279c4eSGlauber Costa "vmap allocation for size %lu failed: " 388c1279c4eSGlauber Costa "use vmalloc=<size> to increase size.\n", size); 389db64fe02SNick Piggin return ERR_PTR(-EBUSY); 390db64fe02SNick Piggin } 391db64fe02SNick Piggin 392db64fe02SNick Piggin BUG_ON(addr & (align-1)); 393db64fe02SNick Piggin 394db64fe02SNick Piggin va->va_start = addr; 395db64fe02SNick Piggin va->va_end = addr + size; 396db64fe02SNick Piggin va->flags = 0; 397db64fe02SNick Piggin __insert_vmap_area(va); 398db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 399db64fe02SNick Piggin 400db64fe02SNick Piggin return va; 401db64fe02SNick Piggin } 402db64fe02SNick Piggin 403db64fe02SNick Piggin static void rcu_free_va(struct rcu_head *head) 404db64fe02SNick Piggin { 405db64fe02SNick Piggin struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); 406db64fe02SNick Piggin 407db64fe02SNick Piggin kfree(va); 408db64fe02SNick Piggin } 409db64fe02SNick Piggin 410db64fe02SNick Piggin static void __free_vmap_area(struct vmap_area *va) 411db64fe02SNick Piggin { 412db64fe02SNick Piggin BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 413db64fe02SNick Piggin rb_erase(&va->rb_node, &vmap_area_root); 414db64fe02SNick Piggin RB_CLEAR_NODE(&va->rb_node); 415db64fe02SNick Piggin list_del_rcu(&va->list); 416db64fe02SNick Piggin 417db64fe02SNick Piggin call_rcu(&va->rcu_head, rcu_free_va); 418db64fe02SNick Piggin } 419db64fe02SNick Piggin 420db64fe02SNick Piggin /* 421db64fe02SNick Piggin * Free a region of KVA allocated by alloc_vmap_area 422db64fe02SNick Piggin */ 423db64fe02SNick Piggin static void free_vmap_area(struct vmap_area *va) 424db64fe02SNick Piggin { 425db64fe02SNick Piggin spin_lock(&vmap_area_lock); 426db64fe02SNick Piggin __free_vmap_area(va); 427db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 428db64fe02SNick Piggin } 429db64fe02SNick Piggin 430db64fe02SNick Piggin /* 431db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 432db64fe02SNick Piggin */ 433db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 434db64fe02SNick Piggin { 435db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 436db64fe02SNick Piggin } 437db64fe02SNick Piggin 438cd52858cSNick Piggin static void vmap_debug_free_range(unsigned long start, unsigned long end) 439cd52858cSNick Piggin { 440cd52858cSNick Piggin /* 441cd52858cSNick Piggin * Unmap page tables and force a TLB flush immediately if 442cd52858cSNick Piggin * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 443cd52858cSNick Piggin * bugs similarly to those in linear kernel virtual address 444cd52858cSNick Piggin * space after a page has been freed. 445cd52858cSNick Piggin * 446cd52858cSNick Piggin * All the lazy freeing logic is still retained, in order to 447cd52858cSNick Piggin * minimise intrusiveness of this debugging feature. 448cd52858cSNick Piggin * 449cd52858cSNick Piggin * This is going to be *slow* (linear kernel virtual address 450cd52858cSNick Piggin * debugging doesn't do a broadcast TLB flush so it is a lot 451cd52858cSNick Piggin * faster). 452cd52858cSNick Piggin */ 453cd52858cSNick Piggin #ifdef CONFIG_DEBUG_PAGEALLOC 454cd52858cSNick Piggin vunmap_page_range(start, end); 455cd52858cSNick Piggin flush_tlb_kernel_range(start, end); 456cd52858cSNick Piggin #endif 457cd52858cSNick Piggin } 458cd52858cSNick Piggin 459db64fe02SNick Piggin /* 460db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 461db64fe02SNick Piggin * before attempting to purge with a TLB flush. 462db64fe02SNick Piggin * 463db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 464db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 465db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 466db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 467db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 468db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 469db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 470db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 471db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 472db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 473db64fe02SNick Piggin * becomes a problem on bigger systems. 474db64fe02SNick Piggin */ 475db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 476db64fe02SNick Piggin { 477db64fe02SNick Piggin unsigned int log; 478db64fe02SNick Piggin 479db64fe02SNick Piggin log = fls(num_online_cpus()); 480db64fe02SNick Piggin 481db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 482db64fe02SNick Piggin } 483db64fe02SNick Piggin 484db64fe02SNick Piggin static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 485db64fe02SNick Piggin 486db64fe02SNick Piggin /* 487db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 488db64fe02SNick Piggin * 489db64fe02SNick Piggin * If sync is 0 then don't purge if there is already a purge in progress. 490db64fe02SNick Piggin * If force_flush is 1, then flush kernel TLBs between *start and *end even 491db64fe02SNick Piggin * if we found no lazy vmap areas to unmap (callers can use this to optimise 492db64fe02SNick Piggin * their own TLB flushing). 493db64fe02SNick Piggin * Returns with *start = min(*start, lowest purged address) 494db64fe02SNick Piggin * *end = max(*end, highest purged address) 495db64fe02SNick Piggin */ 496db64fe02SNick Piggin static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 497db64fe02SNick Piggin int sync, int force_flush) 498db64fe02SNick Piggin { 499e97a630eSNick Piggin static DEFINE_MUTEX(purge_lock); 500db64fe02SNick Piggin LIST_HEAD(valist); 501db64fe02SNick Piggin struct vmap_area *va; 502db64fe02SNick Piggin int nr = 0; 503db64fe02SNick Piggin 504db64fe02SNick Piggin /* 505db64fe02SNick Piggin * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 506db64fe02SNick Piggin * should not expect such behaviour. This just simplifies locking for 507db64fe02SNick Piggin * the case that isn't actually used at the moment anyway. 508db64fe02SNick Piggin */ 509db64fe02SNick Piggin if (!sync && !force_flush) { 510e97a630eSNick Piggin if (!mutex_trylock(&purge_lock)) 511db64fe02SNick Piggin return; 512db64fe02SNick Piggin } else 513e97a630eSNick Piggin mutex_lock(&purge_lock); 514db64fe02SNick Piggin 515db64fe02SNick Piggin rcu_read_lock(); 516db64fe02SNick Piggin list_for_each_entry_rcu(va, &vmap_area_list, list) { 517db64fe02SNick Piggin if (va->flags & VM_LAZY_FREE) { 518db64fe02SNick Piggin if (va->va_start < *start) 519db64fe02SNick Piggin *start = va->va_start; 520db64fe02SNick Piggin if (va->va_end > *end) 521db64fe02SNick Piggin *end = va->va_end; 522db64fe02SNick Piggin nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 523db64fe02SNick Piggin unmap_vmap_area(va); 524db64fe02SNick Piggin list_add_tail(&va->purge_list, &valist); 525db64fe02SNick Piggin va->flags |= VM_LAZY_FREEING; 526db64fe02SNick Piggin va->flags &= ~VM_LAZY_FREE; 527db64fe02SNick Piggin } 528db64fe02SNick Piggin } 529db64fe02SNick Piggin rcu_read_unlock(); 530db64fe02SNick Piggin 531db64fe02SNick Piggin if (nr) { 532db64fe02SNick Piggin BUG_ON(nr > atomic_read(&vmap_lazy_nr)); 533db64fe02SNick Piggin atomic_sub(nr, &vmap_lazy_nr); 534db64fe02SNick Piggin } 535db64fe02SNick Piggin 536db64fe02SNick Piggin if (nr || force_flush) 537db64fe02SNick Piggin flush_tlb_kernel_range(*start, *end); 538db64fe02SNick Piggin 539db64fe02SNick Piggin if (nr) { 540db64fe02SNick Piggin spin_lock(&vmap_area_lock); 541db64fe02SNick Piggin list_for_each_entry(va, &valist, purge_list) 542db64fe02SNick Piggin __free_vmap_area(va); 543db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 544db64fe02SNick Piggin } 545e97a630eSNick Piggin mutex_unlock(&purge_lock); 546db64fe02SNick Piggin } 547db64fe02SNick Piggin 548db64fe02SNick Piggin /* 549496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 550496850e5SNick Piggin * is already purging. 551496850e5SNick Piggin */ 552496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 553496850e5SNick Piggin { 554496850e5SNick Piggin unsigned long start = ULONG_MAX, end = 0; 555496850e5SNick Piggin 556496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 0, 0); 557496850e5SNick Piggin } 558496850e5SNick Piggin 559496850e5SNick Piggin /* 560db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 561db64fe02SNick Piggin */ 562db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 563db64fe02SNick Piggin { 564db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 565db64fe02SNick Piggin 566496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, 0); 567db64fe02SNick Piggin } 568db64fe02SNick Piggin 569db64fe02SNick Piggin /* 570b29acbdcSNick Piggin * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 571b29acbdcSNick Piggin * called for the correct range previously. 572db64fe02SNick Piggin */ 573b29acbdcSNick Piggin static void free_unmap_vmap_area_noflush(struct vmap_area *va) 574db64fe02SNick Piggin { 575db64fe02SNick Piggin va->flags |= VM_LAZY_FREE; 576db64fe02SNick Piggin atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 577db64fe02SNick Piggin if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 578496850e5SNick Piggin try_purge_vmap_area_lazy(); 579db64fe02SNick Piggin } 580db64fe02SNick Piggin 581b29acbdcSNick Piggin /* 582b29acbdcSNick Piggin * Free and unmap a vmap area 583b29acbdcSNick Piggin */ 584b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 585b29acbdcSNick Piggin { 586b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 587b29acbdcSNick Piggin free_unmap_vmap_area_noflush(va); 588b29acbdcSNick Piggin } 589b29acbdcSNick Piggin 590db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 591db64fe02SNick Piggin { 592db64fe02SNick Piggin struct vmap_area *va; 593db64fe02SNick Piggin 594db64fe02SNick Piggin spin_lock(&vmap_area_lock); 595db64fe02SNick Piggin va = __find_vmap_area(addr); 596db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 597db64fe02SNick Piggin 598db64fe02SNick Piggin return va; 599db64fe02SNick Piggin } 600db64fe02SNick Piggin 601db64fe02SNick Piggin static void free_unmap_vmap_area_addr(unsigned long addr) 602db64fe02SNick Piggin { 603db64fe02SNick Piggin struct vmap_area *va; 604db64fe02SNick Piggin 605db64fe02SNick Piggin va = find_vmap_area(addr); 606db64fe02SNick Piggin BUG_ON(!va); 607db64fe02SNick Piggin free_unmap_vmap_area(va); 608db64fe02SNick Piggin } 609db64fe02SNick Piggin 610db64fe02SNick Piggin 611db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 612db64fe02SNick Piggin 613db64fe02SNick Piggin /* 614db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 615db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 616db64fe02SNick Piggin */ 617db64fe02SNick Piggin /* 618db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 619db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 620db64fe02SNick Piggin * instead (we just need a rough idea) 621db64fe02SNick Piggin */ 622db64fe02SNick Piggin #if BITS_PER_LONG == 32 623db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 624db64fe02SNick Piggin #else 625db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 626db64fe02SNick Piggin #endif 627db64fe02SNick Piggin 628db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 629db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 630db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 631db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 632db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 633db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 634db64fe02SNick Piggin #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 635db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 636db64fe02SNick Piggin VMALLOC_PAGES / NR_CPUS / 16)) 637db64fe02SNick Piggin 638db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 639db64fe02SNick Piggin 6409b463334SJeremy Fitzhardinge static bool vmap_initialized __read_mostly = false; 6419b463334SJeremy Fitzhardinge 642db64fe02SNick Piggin struct vmap_block_queue { 643db64fe02SNick Piggin spinlock_t lock; 644db64fe02SNick Piggin struct list_head free; 645db64fe02SNick Piggin struct list_head dirty; 646db64fe02SNick Piggin unsigned int nr_dirty; 647db64fe02SNick Piggin }; 648db64fe02SNick Piggin 649db64fe02SNick Piggin struct vmap_block { 650db64fe02SNick Piggin spinlock_t lock; 651db64fe02SNick Piggin struct vmap_area *va; 652db64fe02SNick Piggin struct vmap_block_queue *vbq; 653db64fe02SNick Piggin unsigned long free, dirty; 654db64fe02SNick Piggin DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 655db64fe02SNick Piggin DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 656db64fe02SNick Piggin union { 657db64fe02SNick Piggin struct { 658db64fe02SNick Piggin struct list_head free_list; 659db64fe02SNick Piggin struct list_head dirty_list; 660db64fe02SNick Piggin }; 661db64fe02SNick Piggin struct rcu_head rcu_head; 662db64fe02SNick Piggin }; 663db64fe02SNick Piggin }; 664db64fe02SNick Piggin 665db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 666db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 667db64fe02SNick Piggin 668db64fe02SNick Piggin /* 669db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 670db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 671db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 672db64fe02SNick Piggin */ 673db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 674db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 675db64fe02SNick Piggin 676db64fe02SNick Piggin /* 677db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 678db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 679db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 680db64fe02SNick Piggin * big problem. 681db64fe02SNick Piggin */ 682db64fe02SNick Piggin 683db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 684db64fe02SNick Piggin { 685db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 686db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 687db64fe02SNick Piggin return addr; 688db64fe02SNick Piggin } 689db64fe02SNick Piggin 690db64fe02SNick Piggin static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 691db64fe02SNick Piggin { 692db64fe02SNick Piggin struct vmap_block_queue *vbq; 693db64fe02SNick Piggin struct vmap_block *vb; 694db64fe02SNick Piggin struct vmap_area *va; 695db64fe02SNick Piggin unsigned long vb_idx; 696db64fe02SNick Piggin int node, err; 697db64fe02SNick Piggin 698db64fe02SNick Piggin node = numa_node_id(); 699db64fe02SNick Piggin 700db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 701db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 702db64fe02SNick Piggin if (unlikely(!vb)) 703db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 704db64fe02SNick Piggin 705db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 706db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 707db64fe02SNick Piggin node, gfp_mask); 708db64fe02SNick Piggin if (unlikely(IS_ERR(va))) { 709db64fe02SNick Piggin kfree(vb); 710db64fe02SNick Piggin return ERR_PTR(PTR_ERR(va)); 711db64fe02SNick Piggin } 712db64fe02SNick Piggin 713db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 714db64fe02SNick Piggin if (unlikely(err)) { 715db64fe02SNick Piggin kfree(vb); 716db64fe02SNick Piggin free_vmap_area(va); 717db64fe02SNick Piggin return ERR_PTR(err); 718db64fe02SNick Piggin } 719db64fe02SNick Piggin 720db64fe02SNick Piggin spin_lock_init(&vb->lock); 721db64fe02SNick Piggin vb->va = va; 722db64fe02SNick Piggin vb->free = VMAP_BBMAP_BITS; 723db64fe02SNick Piggin vb->dirty = 0; 724db64fe02SNick Piggin bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 725db64fe02SNick Piggin bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 726db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 727db64fe02SNick Piggin INIT_LIST_HEAD(&vb->dirty_list); 728db64fe02SNick Piggin 729db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 730db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 731db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 732db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 733db64fe02SNick Piggin BUG_ON(err); 734db64fe02SNick Piggin radix_tree_preload_end(); 735db64fe02SNick Piggin 736db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 737db64fe02SNick Piggin vb->vbq = vbq; 738db64fe02SNick Piggin spin_lock(&vbq->lock); 739db64fe02SNick Piggin list_add(&vb->free_list, &vbq->free); 740db64fe02SNick Piggin spin_unlock(&vbq->lock); 741db64fe02SNick Piggin put_cpu_var(vmap_cpu_blocks); 742db64fe02SNick Piggin 743db64fe02SNick Piggin return vb; 744db64fe02SNick Piggin } 745db64fe02SNick Piggin 746db64fe02SNick Piggin static void rcu_free_vb(struct rcu_head *head) 747db64fe02SNick Piggin { 748db64fe02SNick Piggin struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); 749db64fe02SNick Piggin 750db64fe02SNick Piggin kfree(vb); 751db64fe02SNick Piggin } 752db64fe02SNick Piggin 753db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 754db64fe02SNick Piggin { 755db64fe02SNick Piggin struct vmap_block *tmp; 756db64fe02SNick Piggin unsigned long vb_idx; 757db64fe02SNick Piggin 758db64fe02SNick Piggin spin_lock(&vb->vbq->lock); 759db64fe02SNick Piggin if (!list_empty(&vb->free_list)) 760db64fe02SNick Piggin list_del(&vb->free_list); 761db64fe02SNick Piggin if (!list_empty(&vb->dirty_list)) 762db64fe02SNick Piggin list_del(&vb->dirty_list); 763db64fe02SNick Piggin spin_unlock(&vb->vbq->lock); 764db64fe02SNick Piggin 765db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 766db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 767db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 768db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 769db64fe02SNick Piggin BUG_ON(tmp != vb); 770db64fe02SNick Piggin 771b29acbdcSNick Piggin free_unmap_vmap_area_noflush(vb->va); 772db64fe02SNick Piggin call_rcu(&vb->rcu_head, rcu_free_vb); 773db64fe02SNick Piggin } 774db64fe02SNick Piggin 775db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 776db64fe02SNick Piggin { 777db64fe02SNick Piggin struct vmap_block_queue *vbq; 778db64fe02SNick Piggin struct vmap_block *vb; 779db64fe02SNick Piggin unsigned long addr = 0; 780db64fe02SNick Piggin unsigned int order; 781db64fe02SNick Piggin 782db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 783db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 784db64fe02SNick Piggin order = get_order(size); 785db64fe02SNick Piggin 786db64fe02SNick Piggin again: 787db64fe02SNick Piggin rcu_read_lock(); 788db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 789db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 790db64fe02SNick Piggin int i; 791db64fe02SNick Piggin 792db64fe02SNick Piggin spin_lock(&vb->lock); 793db64fe02SNick Piggin i = bitmap_find_free_region(vb->alloc_map, 794db64fe02SNick Piggin VMAP_BBMAP_BITS, order); 795db64fe02SNick Piggin 796db64fe02SNick Piggin if (i >= 0) { 797db64fe02SNick Piggin addr = vb->va->va_start + (i << PAGE_SHIFT); 798db64fe02SNick Piggin BUG_ON(addr_to_vb_idx(addr) != 799db64fe02SNick Piggin addr_to_vb_idx(vb->va->va_start)); 800db64fe02SNick Piggin vb->free -= 1UL << order; 801db64fe02SNick Piggin if (vb->free == 0) { 802db64fe02SNick Piggin spin_lock(&vbq->lock); 803db64fe02SNick Piggin list_del_init(&vb->free_list); 804db64fe02SNick Piggin spin_unlock(&vbq->lock); 805db64fe02SNick Piggin } 806db64fe02SNick Piggin spin_unlock(&vb->lock); 807db64fe02SNick Piggin break; 808db64fe02SNick Piggin } 809db64fe02SNick Piggin spin_unlock(&vb->lock); 810db64fe02SNick Piggin } 811db64fe02SNick Piggin put_cpu_var(vmap_cpu_blocks); 812db64fe02SNick Piggin rcu_read_unlock(); 813db64fe02SNick Piggin 814db64fe02SNick Piggin if (!addr) { 815db64fe02SNick Piggin vb = new_vmap_block(gfp_mask); 816db64fe02SNick Piggin if (IS_ERR(vb)) 817db64fe02SNick Piggin return vb; 818db64fe02SNick Piggin goto again; 819db64fe02SNick Piggin } 820db64fe02SNick Piggin 821db64fe02SNick Piggin return (void *)addr; 822db64fe02SNick Piggin } 823db64fe02SNick Piggin 824db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 825db64fe02SNick Piggin { 826db64fe02SNick Piggin unsigned long offset; 827db64fe02SNick Piggin unsigned long vb_idx; 828db64fe02SNick Piggin unsigned int order; 829db64fe02SNick Piggin struct vmap_block *vb; 830db64fe02SNick Piggin 831db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 832db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 833b29acbdcSNick Piggin 834b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 835b29acbdcSNick Piggin 836db64fe02SNick Piggin order = get_order(size); 837db64fe02SNick Piggin 838db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 839db64fe02SNick Piggin 840db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 841db64fe02SNick Piggin rcu_read_lock(); 842db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 843db64fe02SNick Piggin rcu_read_unlock(); 844db64fe02SNick Piggin BUG_ON(!vb); 845db64fe02SNick Piggin 846db64fe02SNick Piggin spin_lock(&vb->lock); 847db64fe02SNick Piggin bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); 848db64fe02SNick Piggin if (!vb->dirty) { 849db64fe02SNick Piggin spin_lock(&vb->vbq->lock); 850db64fe02SNick Piggin list_add(&vb->dirty_list, &vb->vbq->dirty); 851db64fe02SNick Piggin spin_unlock(&vb->vbq->lock); 852db64fe02SNick Piggin } 853db64fe02SNick Piggin vb->dirty += 1UL << order; 854db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 855db64fe02SNick Piggin BUG_ON(vb->free || !list_empty(&vb->free_list)); 856db64fe02SNick Piggin spin_unlock(&vb->lock); 857db64fe02SNick Piggin free_vmap_block(vb); 858db64fe02SNick Piggin } else 859db64fe02SNick Piggin spin_unlock(&vb->lock); 860db64fe02SNick Piggin } 861db64fe02SNick Piggin 862db64fe02SNick Piggin /** 863db64fe02SNick Piggin * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 864db64fe02SNick Piggin * 865db64fe02SNick Piggin * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 866db64fe02SNick Piggin * to amortize TLB flushing overheads. What this means is that any page you 867db64fe02SNick Piggin * have now, may, in a former life, have been mapped into kernel virtual 868db64fe02SNick Piggin * address by the vmap layer and so there might be some CPUs with TLB entries 869db64fe02SNick Piggin * still referencing that page (additional to the regular 1:1 kernel mapping). 870db64fe02SNick Piggin * 871db64fe02SNick Piggin * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 872db64fe02SNick Piggin * be sure that none of the pages we have control over will have any aliases 873db64fe02SNick Piggin * from the vmap layer. 874db64fe02SNick Piggin */ 875db64fe02SNick Piggin void vm_unmap_aliases(void) 876db64fe02SNick Piggin { 877db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 878db64fe02SNick Piggin int cpu; 879db64fe02SNick Piggin int flush = 0; 880db64fe02SNick Piggin 8819b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 8829b463334SJeremy Fitzhardinge return; 8839b463334SJeremy Fitzhardinge 884db64fe02SNick Piggin for_each_possible_cpu(cpu) { 885db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 886db64fe02SNick Piggin struct vmap_block *vb; 887db64fe02SNick Piggin 888db64fe02SNick Piggin rcu_read_lock(); 889db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 890db64fe02SNick Piggin int i; 891db64fe02SNick Piggin 892db64fe02SNick Piggin spin_lock(&vb->lock); 893db64fe02SNick Piggin i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 894db64fe02SNick Piggin while (i < VMAP_BBMAP_BITS) { 895db64fe02SNick Piggin unsigned long s, e; 896db64fe02SNick Piggin int j; 897db64fe02SNick Piggin j = find_next_zero_bit(vb->dirty_map, 898db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 899db64fe02SNick Piggin 900db64fe02SNick Piggin s = vb->va->va_start + (i << PAGE_SHIFT); 901db64fe02SNick Piggin e = vb->va->va_start + (j << PAGE_SHIFT); 902db64fe02SNick Piggin vunmap_page_range(s, e); 903db64fe02SNick Piggin flush = 1; 904db64fe02SNick Piggin 905db64fe02SNick Piggin if (s < start) 906db64fe02SNick Piggin start = s; 907db64fe02SNick Piggin if (e > end) 908db64fe02SNick Piggin end = e; 909db64fe02SNick Piggin 910db64fe02SNick Piggin i = j; 911db64fe02SNick Piggin i = find_next_bit(vb->dirty_map, 912db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 913db64fe02SNick Piggin } 914db64fe02SNick Piggin spin_unlock(&vb->lock); 915db64fe02SNick Piggin } 916db64fe02SNick Piggin rcu_read_unlock(); 917db64fe02SNick Piggin } 918db64fe02SNick Piggin 919db64fe02SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, flush); 920db64fe02SNick Piggin } 921db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 922db64fe02SNick Piggin 923db64fe02SNick Piggin /** 924db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 925db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 926db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 927db64fe02SNick Piggin */ 928db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 929db64fe02SNick Piggin { 930db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 931db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 932db64fe02SNick Piggin 933db64fe02SNick Piggin BUG_ON(!addr); 934db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 935db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 936db64fe02SNick Piggin BUG_ON(addr & (PAGE_SIZE-1)); 937db64fe02SNick Piggin 938db64fe02SNick Piggin debug_check_no_locks_freed(mem, size); 939cd52858cSNick Piggin vmap_debug_free_range(addr, addr+size); 940db64fe02SNick Piggin 941db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) 942db64fe02SNick Piggin vb_free(mem, size); 943db64fe02SNick Piggin else 944db64fe02SNick Piggin free_unmap_vmap_area_addr(addr); 945db64fe02SNick Piggin } 946db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 947db64fe02SNick Piggin 948db64fe02SNick Piggin /** 949db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 950db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 951db64fe02SNick Piggin * @count: number of pages 952db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 953db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 954e99c97adSRandy Dunlap * 955e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 956db64fe02SNick Piggin */ 957db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 958db64fe02SNick Piggin { 959db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 960db64fe02SNick Piggin unsigned long addr; 961db64fe02SNick Piggin void *mem; 962db64fe02SNick Piggin 963db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 964db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 965db64fe02SNick Piggin if (IS_ERR(mem)) 966db64fe02SNick Piggin return NULL; 967db64fe02SNick Piggin addr = (unsigned long)mem; 968db64fe02SNick Piggin } else { 969db64fe02SNick Piggin struct vmap_area *va; 970db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 971db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 972db64fe02SNick Piggin if (IS_ERR(va)) 973db64fe02SNick Piggin return NULL; 974db64fe02SNick Piggin 975db64fe02SNick Piggin addr = va->va_start; 976db64fe02SNick Piggin mem = (void *)addr; 977db64fe02SNick Piggin } 978db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 979db64fe02SNick Piggin vm_unmap_ram(mem, count); 980db64fe02SNick Piggin return NULL; 981db64fe02SNick Piggin } 982db64fe02SNick Piggin return mem; 983db64fe02SNick Piggin } 984db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 985db64fe02SNick Piggin 986db64fe02SNick Piggin void __init vmalloc_init(void) 987db64fe02SNick Piggin { 988*822c18f2SIvan Kokshaysky struct vmap_area *va; 989*822c18f2SIvan Kokshaysky struct vm_struct *tmp; 990db64fe02SNick Piggin int i; 991db64fe02SNick Piggin 992db64fe02SNick Piggin for_each_possible_cpu(i) { 993db64fe02SNick Piggin struct vmap_block_queue *vbq; 994db64fe02SNick Piggin 995db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 996db64fe02SNick Piggin spin_lock_init(&vbq->lock); 997db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 998db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->dirty); 999db64fe02SNick Piggin vbq->nr_dirty = 0; 1000db64fe02SNick Piggin } 10019b463334SJeremy Fitzhardinge 1002*822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1003*822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 1004*822c18f2SIvan Kokshaysky va = alloc_bootmem(sizeof(struct vmap_area)); 1005*822c18f2SIvan Kokshaysky va->flags = tmp->flags | VM_VM_AREA; 1006*822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1007*822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1008*822c18f2SIvan Kokshaysky __insert_vmap_area(va); 1009*822c18f2SIvan Kokshaysky } 10109b463334SJeremy Fitzhardinge vmap_initialized = true; 1011db64fe02SNick Piggin } 1012db64fe02SNick Piggin 1013db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 1014db64fe02SNick Piggin { 1015db64fe02SNick Piggin unsigned long end = addr + size; 1016db64fe02SNick Piggin vunmap_page_range(addr, end); 1017db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 1018db64fe02SNick Piggin } 1019db64fe02SNick Piggin 1020db64fe02SNick Piggin int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1021db64fe02SNick Piggin { 1022db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 1023db64fe02SNick Piggin unsigned long end = addr + area->size - PAGE_SIZE; 1024db64fe02SNick Piggin int err; 1025db64fe02SNick Piggin 1026db64fe02SNick Piggin err = vmap_page_range(addr, end, prot, *pages); 1027db64fe02SNick Piggin if (err > 0) { 1028db64fe02SNick Piggin *pages += err; 1029db64fe02SNick Piggin err = 0; 1030db64fe02SNick Piggin } 1031db64fe02SNick Piggin 1032db64fe02SNick Piggin return err; 1033db64fe02SNick Piggin } 1034db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 1035db64fe02SNick Piggin 1036db64fe02SNick Piggin /*** Old vmalloc interfaces ***/ 1037db64fe02SNick Piggin DEFINE_RWLOCK(vmlist_lock); 1038db64fe02SNick Piggin struct vm_struct *vmlist; 1039db64fe02SNick Piggin 1040db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 1041db64fe02SNick Piggin unsigned long flags, unsigned long start, unsigned long end, 1042db64fe02SNick Piggin int node, gfp_t gfp_mask, void *caller) 1043db64fe02SNick Piggin { 1044db64fe02SNick Piggin static struct vmap_area *va; 1045db64fe02SNick Piggin struct vm_struct *area; 1046db64fe02SNick Piggin struct vm_struct *tmp, **p; 1047db64fe02SNick Piggin unsigned long align = 1; 10481da177e4SLinus Torvalds 104952fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 10501da177e4SLinus Torvalds if (flags & VM_IOREMAP) { 10511da177e4SLinus Torvalds int bit = fls(size); 10521da177e4SLinus Torvalds 10531da177e4SLinus Torvalds if (bit > IOREMAP_MAX_ORDER) 10541da177e4SLinus Torvalds bit = IOREMAP_MAX_ORDER; 10551da177e4SLinus Torvalds else if (bit < PAGE_SHIFT) 10561da177e4SLinus Torvalds bit = PAGE_SHIFT; 10571da177e4SLinus Torvalds 10581da177e4SLinus Torvalds align = 1ul << bit; 10591da177e4SLinus Torvalds } 1060db64fe02SNick Piggin 10611da177e4SLinus Torvalds size = PAGE_ALIGN(size); 106231be8309SOGAWA Hirofumi if (unlikely(!size)) 106331be8309SOGAWA Hirofumi return NULL; 10641da177e4SLinus Torvalds 10656cb06229SChristoph Lameter area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 10661da177e4SLinus Torvalds if (unlikely(!area)) 10671da177e4SLinus Torvalds return NULL; 10681da177e4SLinus Torvalds 10691da177e4SLinus Torvalds /* 10701da177e4SLinus Torvalds * We always allocate a guard page. 10711da177e4SLinus Torvalds */ 10721da177e4SLinus Torvalds size += PAGE_SIZE; 10731da177e4SLinus Torvalds 1074db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1075db64fe02SNick Piggin if (IS_ERR(va)) { 1076db64fe02SNick Piggin kfree(area); 1077db64fe02SNick Piggin return NULL; 10781da177e4SLinus Torvalds } 10791da177e4SLinus Torvalds 10801da177e4SLinus Torvalds area->flags = flags; 1081db64fe02SNick Piggin area->addr = (void *)va->va_start; 10821da177e4SLinus Torvalds area->size = size; 10831da177e4SLinus Torvalds area->pages = NULL; 10841da177e4SLinus Torvalds area->nr_pages = 0; 10851da177e4SLinus Torvalds area->phys_addr = 0; 108623016969SChristoph Lameter area->caller = caller; 1087db64fe02SNick Piggin va->private = area; 1088db64fe02SNick Piggin va->flags |= VM_VM_AREA; 1089db64fe02SNick Piggin 1090db64fe02SNick Piggin write_lock(&vmlist_lock); 1091db64fe02SNick Piggin for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1092db64fe02SNick Piggin if (tmp->addr >= area->addr) 1093db64fe02SNick Piggin break; 1094db64fe02SNick Piggin } 1095db64fe02SNick Piggin area->next = *p; 1096db64fe02SNick Piggin *p = area; 10971da177e4SLinus Torvalds write_unlock(&vmlist_lock); 10981da177e4SLinus Torvalds 10991da177e4SLinus Torvalds return area; 11001da177e4SLinus Torvalds } 11011da177e4SLinus Torvalds 1102930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1103930fc45aSChristoph Lameter unsigned long start, unsigned long end) 1104930fc45aSChristoph Lameter { 110523016969SChristoph Lameter return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 110623016969SChristoph Lameter __builtin_return_address(0)); 1107930fc45aSChristoph Lameter } 11085992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 1109930fc45aSChristoph Lameter 11101da177e4SLinus Torvalds /** 1111183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 11121da177e4SLinus Torvalds * @size: size of the area 11131da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 11141da177e4SLinus Torvalds * 11151da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 11161da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 11171da177e4SLinus Torvalds * on success or %NULL on failure. 11181da177e4SLinus Torvalds */ 11191da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 11201da177e4SLinus Torvalds { 112123016969SChristoph Lameter return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 112223016969SChristoph Lameter -1, GFP_KERNEL, __builtin_return_address(0)); 112323016969SChristoph Lameter } 112423016969SChristoph Lameter 112523016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 112623016969SChristoph Lameter void *caller) 112723016969SChristoph Lameter { 112823016969SChristoph Lameter return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 112923016969SChristoph Lameter -1, GFP_KERNEL, caller); 11301da177e4SLinus Torvalds } 11311da177e4SLinus Torvalds 113252fd24caSGiridhar Pemmasani struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 113352fd24caSGiridhar Pemmasani int node, gfp_t gfp_mask) 1134930fc45aSChristoph Lameter { 113552fd24caSGiridhar Pemmasani return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 113623016969SChristoph Lameter gfp_mask, __builtin_return_address(0)); 1137930fc45aSChristoph Lameter } 1138930fc45aSChristoph Lameter 1139db64fe02SNick Piggin static struct vm_struct *find_vm_area(const void *addr) 114083342314SNick Piggin { 1141db64fe02SNick Piggin struct vmap_area *va; 114283342314SNick Piggin 1143db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1144db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) 1145db64fe02SNick Piggin return va->private; 114683342314SNick Piggin 11477856dfebSAndi Kleen return NULL; 11487856dfebSAndi Kleen } 11497856dfebSAndi Kleen 11501da177e4SLinus Torvalds /** 1151183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 11521da177e4SLinus Torvalds * @addr: base address 11531da177e4SLinus Torvalds * 11541da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 11551da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 11567856dfebSAndi Kleen * on SMP machines, except for its size or flags. 11571da177e4SLinus Torvalds */ 1158b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 11591da177e4SLinus Torvalds { 1160db64fe02SNick Piggin struct vmap_area *va; 1161db64fe02SNick Piggin 1162db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1163db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) { 1164db64fe02SNick Piggin struct vm_struct *vm = va->private; 1165db64fe02SNick Piggin struct vm_struct *tmp, **p; 1166cd52858cSNick Piggin 1167cd52858cSNick Piggin vmap_debug_free_range(va->va_start, va->va_end); 1168db64fe02SNick Piggin free_unmap_vmap_area(va); 1169db64fe02SNick Piggin vm->size -= PAGE_SIZE; 1170db64fe02SNick Piggin 11711da177e4SLinus Torvalds write_lock(&vmlist_lock); 1172db64fe02SNick Piggin for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) 1173db64fe02SNick Piggin ; 1174db64fe02SNick Piggin *p = tmp->next; 11751da177e4SLinus Torvalds write_unlock(&vmlist_lock); 1176db64fe02SNick Piggin 1177db64fe02SNick Piggin return vm; 1178db64fe02SNick Piggin } 1179db64fe02SNick Piggin return NULL; 11801da177e4SLinus Torvalds } 11811da177e4SLinus Torvalds 1182b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 11831da177e4SLinus Torvalds { 11841da177e4SLinus Torvalds struct vm_struct *area; 11851da177e4SLinus Torvalds 11861da177e4SLinus Torvalds if (!addr) 11871da177e4SLinus Torvalds return; 11881da177e4SLinus Torvalds 11891da177e4SLinus Torvalds if ((PAGE_SIZE-1) & (unsigned long)addr) { 11904c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 11911da177e4SLinus Torvalds return; 11921da177e4SLinus Torvalds } 11931da177e4SLinus Torvalds 11941da177e4SLinus Torvalds area = remove_vm_area(addr); 11951da177e4SLinus Torvalds if (unlikely(!area)) { 11964c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 11971da177e4SLinus Torvalds addr); 11981da177e4SLinus Torvalds return; 11991da177e4SLinus Torvalds } 12001da177e4SLinus Torvalds 12019a11b49aSIngo Molnar debug_check_no_locks_freed(addr, area->size); 12023ac7fe5aSThomas Gleixner debug_check_no_obj_freed(addr, area->size); 12039a11b49aSIngo Molnar 12041da177e4SLinus Torvalds if (deallocate_pages) { 12051da177e4SLinus Torvalds int i; 12061da177e4SLinus Torvalds 12071da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1208bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 1209bf53d6f8SChristoph Lameter 1210bf53d6f8SChristoph Lameter BUG_ON(!page); 1211bf53d6f8SChristoph Lameter __free_page(page); 12121da177e4SLinus Torvalds } 12131da177e4SLinus Torvalds 12148757d5faSJan Kiszka if (area->flags & VM_VPAGES) 12151da177e4SLinus Torvalds vfree(area->pages); 12161da177e4SLinus Torvalds else 12171da177e4SLinus Torvalds kfree(area->pages); 12181da177e4SLinus Torvalds } 12191da177e4SLinus Torvalds 12201da177e4SLinus Torvalds kfree(area); 12211da177e4SLinus Torvalds return; 12221da177e4SLinus Torvalds } 12231da177e4SLinus Torvalds 12241da177e4SLinus Torvalds /** 12251da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 12261da177e4SLinus Torvalds * @addr: memory base address 12271da177e4SLinus Torvalds * 1228183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 122980e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 123080e93effSPekka Enberg * NULL, no operation is performed. 12311da177e4SLinus Torvalds * 123280e93effSPekka Enberg * Must not be called in interrupt context. 12331da177e4SLinus Torvalds */ 1234b3bdda02SChristoph Lameter void vfree(const void *addr) 12351da177e4SLinus Torvalds { 12361da177e4SLinus Torvalds BUG_ON(in_interrupt()); 12371da177e4SLinus Torvalds __vunmap(addr, 1); 12381da177e4SLinus Torvalds } 12391da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 12401da177e4SLinus Torvalds 12411da177e4SLinus Torvalds /** 12421da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 12431da177e4SLinus Torvalds * @addr: memory base address 12441da177e4SLinus Torvalds * 12451da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 12461da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 12471da177e4SLinus Torvalds * 124880e93effSPekka Enberg * Must not be called in interrupt context. 12491da177e4SLinus Torvalds */ 1250b3bdda02SChristoph Lameter void vunmap(const void *addr) 12511da177e4SLinus Torvalds { 12521da177e4SLinus Torvalds BUG_ON(in_interrupt()); 12531da177e4SLinus Torvalds __vunmap(addr, 0); 12541da177e4SLinus Torvalds } 12551da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 12561da177e4SLinus Torvalds 12571da177e4SLinus Torvalds /** 12581da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 12591da177e4SLinus Torvalds * @pages: array of page pointers 12601da177e4SLinus Torvalds * @count: number of pages to map 12611da177e4SLinus Torvalds * @flags: vm_area->flags 12621da177e4SLinus Torvalds * @prot: page protection for the mapping 12631da177e4SLinus Torvalds * 12641da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 12651da177e4SLinus Torvalds * space. 12661da177e4SLinus Torvalds */ 12671da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 12681da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 12691da177e4SLinus Torvalds { 12701da177e4SLinus Torvalds struct vm_struct *area; 12711da177e4SLinus Torvalds 12721da177e4SLinus Torvalds if (count > num_physpages) 12731da177e4SLinus Torvalds return NULL; 12741da177e4SLinus Torvalds 127523016969SChristoph Lameter area = get_vm_area_caller((count << PAGE_SHIFT), flags, 127623016969SChristoph Lameter __builtin_return_address(0)); 12771da177e4SLinus Torvalds if (!area) 12781da177e4SLinus Torvalds return NULL; 127923016969SChristoph Lameter 12801da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) { 12811da177e4SLinus Torvalds vunmap(area->addr); 12821da177e4SLinus Torvalds return NULL; 12831da177e4SLinus Torvalds } 12841da177e4SLinus Torvalds 12851da177e4SLinus Torvalds return area->addr; 12861da177e4SLinus Torvalds } 12871da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 12881da177e4SLinus Torvalds 1289db64fe02SNick Piggin static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1290db64fe02SNick Piggin int node, void *caller); 1291e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 129223016969SChristoph Lameter pgprot_t prot, int node, void *caller) 12931da177e4SLinus Torvalds { 12941da177e4SLinus Torvalds struct page **pages; 12951da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 12961da177e4SLinus Torvalds 12971da177e4SLinus Torvalds nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 12981da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 12991da177e4SLinus Torvalds 13001da177e4SLinus Torvalds area->nr_pages = nr_pages; 13011da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 13028757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 130394f6030cSChristoph Lameter pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 130423016969SChristoph Lameter PAGE_KERNEL, node, caller); 13058757d5faSJan Kiszka area->flags |= VM_VPAGES; 1306286e1ea3SAndrew Morton } else { 1307286e1ea3SAndrew Morton pages = kmalloc_node(array_size, 13086cb06229SChristoph Lameter (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, 1309286e1ea3SAndrew Morton node); 1310286e1ea3SAndrew Morton } 13111da177e4SLinus Torvalds area->pages = pages; 131223016969SChristoph Lameter area->caller = caller; 13131da177e4SLinus Torvalds if (!area->pages) { 13141da177e4SLinus Torvalds remove_vm_area(area->addr); 13151da177e4SLinus Torvalds kfree(area); 13161da177e4SLinus Torvalds return NULL; 13171da177e4SLinus Torvalds } 13181da177e4SLinus Torvalds 13191da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1320bf53d6f8SChristoph Lameter struct page *page; 1321bf53d6f8SChristoph Lameter 1322930fc45aSChristoph Lameter if (node < 0) 1323bf53d6f8SChristoph Lameter page = alloc_page(gfp_mask); 1324930fc45aSChristoph Lameter else 1325bf53d6f8SChristoph Lameter page = alloc_pages_node(node, gfp_mask, 0); 1326bf53d6f8SChristoph Lameter 1327bf53d6f8SChristoph Lameter if (unlikely(!page)) { 13281da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 13291da177e4SLinus Torvalds area->nr_pages = i; 13301da177e4SLinus Torvalds goto fail; 13311da177e4SLinus Torvalds } 1332bf53d6f8SChristoph Lameter area->pages[i] = page; 13331da177e4SLinus Torvalds } 13341da177e4SLinus Torvalds 13351da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) 13361da177e4SLinus Torvalds goto fail; 13371da177e4SLinus Torvalds return area->addr; 13381da177e4SLinus Torvalds 13391da177e4SLinus Torvalds fail: 13401da177e4SLinus Torvalds vfree(area->addr); 13411da177e4SLinus Torvalds return NULL; 13421da177e4SLinus Torvalds } 13431da177e4SLinus Torvalds 1344930fc45aSChristoph Lameter void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 1345930fc45aSChristoph Lameter { 134623016969SChristoph Lameter return __vmalloc_area_node(area, gfp_mask, prot, -1, 134723016969SChristoph Lameter __builtin_return_address(0)); 1348930fc45aSChristoph Lameter } 1349930fc45aSChristoph Lameter 13501da177e4SLinus Torvalds /** 1351930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 13521da177e4SLinus Torvalds * @size: allocation size 13531da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 13541da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 1355d44e0780SRandy Dunlap * @node: node to use for allocation or -1 1356c85d194bSRandy Dunlap * @caller: caller's return address 13571da177e4SLinus Torvalds * 13581da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 13591da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 13601da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 13611da177e4SLinus Torvalds */ 1362b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 136323016969SChristoph Lameter int node, void *caller) 13641da177e4SLinus Torvalds { 13651da177e4SLinus Torvalds struct vm_struct *area; 13661da177e4SLinus Torvalds 13671da177e4SLinus Torvalds size = PAGE_ALIGN(size); 13681da177e4SLinus Torvalds if (!size || (size >> PAGE_SHIFT) > num_physpages) 13691da177e4SLinus Torvalds return NULL; 13701da177e4SLinus Torvalds 137123016969SChristoph Lameter area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 137223016969SChristoph Lameter node, gfp_mask, caller); 137323016969SChristoph Lameter 13741da177e4SLinus Torvalds if (!area) 13751da177e4SLinus Torvalds return NULL; 13761da177e4SLinus Torvalds 137723016969SChristoph Lameter return __vmalloc_area_node(area, gfp_mask, prot, node, caller); 13781da177e4SLinus Torvalds } 13791da177e4SLinus Torvalds 1380930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1381930fc45aSChristoph Lameter { 138223016969SChristoph Lameter return __vmalloc_node(size, gfp_mask, prot, -1, 138323016969SChristoph Lameter __builtin_return_address(0)); 1384930fc45aSChristoph Lameter } 13851da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 13861da177e4SLinus Torvalds 13871da177e4SLinus Torvalds /** 13881da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 13891da177e4SLinus Torvalds * @size: allocation size 13901da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 13911da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 13921da177e4SLinus Torvalds * 1393c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 13941da177e4SLinus Torvalds * use __vmalloc() instead. 13951da177e4SLinus Torvalds */ 13961da177e4SLinus Torvalds void *vmalloc(unsigned long size) 13971da177e4SLinus Torvalds { 139823016969SChristoph Lameter return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 139923016969SChristoph Lameter -1, __builtin_return_address(0)); 14001da177e4SLinus Torvalds } 14011da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 14021da177e4SLinus Torvalds 1403930fc45aSChristoph Lameter /** 1404ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 140583342314SNick Piggin * @size: allocation size 1406ead04089SRolf Eike Beer * 1407ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 1408ead04089SRolf Eike Beer * without leaking data. 140983342314SNick Piggin */ 141083342314SNick Piggin void *vmalloc_user(unsigned long size) 141183342314SNick Piggin { 141283342314SNick Piggin struct vm_struct *area; 141383342314SNick Piggin void *ret; 141483342314SNick Piggin 141584877848SGlauber Costa ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 141684877848SGlauber Costa PAGE_KERNEL, -1, __builtin_return_address(0)); 14172b4ac44eSEric Dumazet if (ret) { 1418db64fe02SNick Piggin area = find_vm_area(ret); 141983342314SNick Piggin area->flags |= VM_USERMAP; 14202b4ac44eSEric Dumazet } 142183342314SNick Piggin return ret; 142283342314SNick Piggin } 142383342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 142483342314SNick Piggin 142583342314SNick Piggin /** 1426930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 1427930fc45aSChristoph Lameter * @size: allocation size 1428d44e0780SRandy Dunlap * @node: numa node 1429930fc45aSChristoph Lameter * 1430930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 1431930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 1432930fc45aSChristoph Lameter * 1433c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 1434930fc45aSChristoph Lameter * use __vmalloc() instead. 1435930fc45aSChristoph Lameter */ 1436930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 1437930fc45aSChristoph Lameter { 143823016969SChristoph Lameter return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 143923016969SChristoph Lameter node, __builtin_return_address(0)); 1440930fc45aSChristoph Lameter } 1441930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 1442930fc45aSChristoph Lameter 14434dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC 14444dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL 14454dc3b16bSPavel Pisa #endif 14464dc3b16bSPavel Pisa 14471da177e4SLinus Torvalds /** 14481da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 14491da177e4SLinus Torvalds * @size: allocation size 14501da177e4SLinus Torvalds * 14511da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 14521da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 14531da177e4SLinus Torvalds * executable kernel virtual space. 14541da177e4SLinus Torvalds * 1455c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 14561da177e4SLinus Torvalds * use __vmalloc() instead. 14571da177e4SLinus Torvalds */ 14581da177e4SLinus Torvalds 14591da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 14601da177e4SLinus Torvalds { 146184877848SGlauber Costa return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 146284877848SGlauber Costa -1, __builtin_return_address(0)); 14631da177e4SLinus Torvalds } 14641da177e4SLinus Torvalds 14650d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 14667ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 14670d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 14687ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 14690d08e0d3SAndi Kleen #else 14700d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL 14710d08e0d3SAndi Kleen #endif 14720d08e0d3SAndi Kleen 14731da177e4SLinus Torvalds /** 14741da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 14751da177e4SLinus Torvalds * @size: allocation size 14761da177e4SLinus Torvalds * 14771da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 14781da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 14791da177e4SLinus Torvalds */ 14801da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 14811da177e4SLinus Torvalds { 148284877848SGlauber Costa return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, 148384877848SGlauber Costa -1, __builtin_return_address(0)); 14841da177e4SLinus Torvalds } 14851da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 14861da177e4SLinus Torvalds 148783342314SNick Piggin /** 1488ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 148983342314SNick Piggin * @size: allocation size 1490ead04089SRolf Eike Beer * 1491ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 1492ead04089SRolf Eike Beer * mapped to userspace without leaking data. 149383342314SNick Piggin */ 149483342314SNick Piggin void *vmalloc_32_user(unsigned long size) 149583342314SNick Piggin { 149683342314SNick Piggin struct vm_struct *area; 149783342314SNick Piggin void *ret; 149883342314SNick Piggin 149984877848SGlauber Costa ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 150084877848SGlauber Costa -1, __builtin_return_address(0)); 15012b4ac44eSEric Dumazet if (ret) { 1502db64fe02SNick Piggin area = find_vm_area(ret); 150383342314SNick Piggin area->flags |= VM_USERMAP; 15042b4ac44eSEric Dumazet } 150583342314SNick Piggin return ret; 150683342314SNick Piggin } 150783342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 150883342314SNick Piggin 15091da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 15101da177e4SLinus Torvalds { 15111da177e4SLinus Torvalds struct vm_struct *tmp; 15121da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 15131da177e4SLinus Torvalds unsigned long n; 15141da177e4SLinus Torvalds 15151da177e4SLinus Torvalds /* Don't allow overflow */ 15161da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 15171da177e4SLinus Torvalds count = -(unsigned long) addr; 15181da177e4SLinus Torvalds 15191da177e4SLinus Torvalds read_lock(&vmlist_lock); 15201da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 15211da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 15221da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 15231da177e4SLinus Torvalds continue; 15241da177e4SLinus Torvalds while (addr < vaddr) { 15251da177e4SLinus Torvalds if (count == 0) 15261da177e4SLinus Torvalds goto finished; 15271da177e4SLinus Torvalds *buf = '\0'; 15281da177e4SLinus Torvalds buf++; 15291da177e4SLinus Torvalds addr++; 15301da177e4SLinus Torvalds count--; 15311da177e4SLinus Torvalds } 15321da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 15331da177e4SLinus Torvalds do { 15341da177e4SLinus Torvalds if (count == 0) 15351da177e4SLinus Torvalds goto finished; 15361da177e4SLinus Torvalds *buf = *addr; 15371da177e4SLinus Torvalds buf++; 15381da177e4SLinus Torvalds addr++; 15391da177e4SLinus Torvalds count--; 15401da177e4SLinus Torvalds } while (--n > 0); 15411da177e4SLinus Torvalds } 15421da177e4SLinus Torvalds finished: 15431da177e4SLinus Torvalds read_unlock(&vmlist_lock); 15441da177e4SLinus Torvalds return buf - buf_start; 15451da177e4SLinus Torvalds } 15461da177e4SLinus Torvalds 15471da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 15481da177e4SLinus Torvalds { 15491da177e4SLinus Torvalds struct vm_struct *tmp; 15501da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 15511da177e4SLinus Torvalds unsigned long n; 15521da177e4SLinus Torvalds 15531da177e4SLinus Torvalds /* Don't allow overflow */ 15541da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 15551da177e4SLinus Torvalds count = -(unsigned long) addr; 15561da177e4SLinus Torvalds 15571da177e4SLinus Torvalds read_lock(&vmlist_lock); 15581da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 15591da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 15601da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 15611da177e4SLinus Torvalds continue; 15621da177e4SLinus Torvalds while (addr < vaddr) { 15631da177e4SLinus Torvalds if (count == 0) 15641da177e4SLinus Torvalds goto finished; 15651da177e4SLinus Torvalds buf++; 15661da177e4SLinus Torvalds addr++; 15671da177e4SLinus Torvalds count--; 15681da177e4SLinus Torvalds } 15691da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 15701da177e4SLinus Torvalds do { 15711da177e4SLinus Torvalds if (count == 0) 15721da177e4SLinus Torvalds goto finished; 15731da177e4SLinus Torvalds *addr = *buf; 15741da177e4SLinus Torvalds buf++; 15751da177e4SLinus Torvalds addr++; 15761da177e4SLinus Torvalds count--; 15771da177e4SLinus Torvalds } while (--n > 0); 15781da177e4SLinus Torvalds } 15791da177e4SLinus Torvalds finished: 15801da177e4SLinus Torvalds read_unlock(&vmlist_lock); 15811da177e4SLinus Torvalds return buf - buf_start; 15821da177e4SLinus Torvalds } 158383342314SNick Piggin 158483342314SNick Piggin /** 158583342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 158683342314SNick Piggin * @vma: vma to cover (map full range of vma) 158783342314SNick Piggin * @addr: vmalloc memory 158883342314SNick Piggin * @pgoff: number of pages into addr before first page to map 15897682486bSRandy Dunlap * 15907682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 159183342314SNick Piggin * 159283342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 159383342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 159483342314SNick Piggin * that criteria isn't met. 159583342314SNick Piggin * 159672fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 159783342314SNick Piggin */ 159883342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 159983342314SNick Piggin unsigned long pgoff) 160083342314SNick Piggin { 160183342314SNick Piggin struct vm_struct *area; 160283342314SNick Piggin unsigned long uaddr = vma->vm_start; 160383342314SNick Piggin unsigned long usize = vma->vm_end - vma->vm_start; 160483342314SNick Piggin 160583342314SNick Piggin if ((PAGE_SIZE-1) & (unsigned long)addr) 160683342314SNick Piggin return -EINVAL; 160783342314SNick Piggin 1608db64fe02SNick Piggin area = find_vm_area(addr); 160983342314SNick Piggin if (!area) 1610db64fe02SNick Piggin return -EINVAL; 161183342314SNick Piggin 161283342314SNick Piggin if (!(area->flags & VM_USERMAP)) 1613db64fe02SNick Piggin return -EINVAL; 161483342314SNick Piggin 161583342314SNick Piggin if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 1616db64fe02SNick Piggin return -EINVAL; 161783342314SNick Piggin 161883342314SNick Piggin addr += pgoff << PAGE_SHIFT; 161983342314SNick Piggin do { 162083342314SNick Piggin struct page *page = vmalloc_to_page(addr); 1621db64fe02SNick Piggin int ret; 1622db64fe02SNick Piggin 162383342314SNick Piggin ret = vm_insert_page(vma, uaddr, page); 162483342314SNick Piggin if (ret) 162583342314SNick Piggin return ret; 162683342314SNick Piggin 162783342314SNick Piggin uaddr += PAGE_SIZE; 162883342314SNick Piggin addr += PAGE_SIZE; 162983342314SNick Piggin usize -= PAGE_SIZE; 163083342314SNick Piggin } while (usize > 0); 163183342314SNick Piggin 163283342314SNick Piggin /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 163383342314SNick Piggin vma->vm_flags |= VM_RESERVED; 163483342314SNick Piggin 1635db64fe02SNick Piggin return 0; 163683342314SNick Piggin } 163783342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 163883342314SNick Piggin 16391eeb66a1SChristoph Hellwig /* 16401eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 16411eeb66a1SChristoph Hellwig * have one. 16421eeb66a1SChristoph Hellwig */ 16431eeb66a1SChristoph Hellwig void __attribute__((weak)) vmalloc_sync_all(void) 16441eeb66a1SChristoph Hellwig { 16451eeb66a1SChristoph Hellwig } 16465f4352fbSJeremy Fitzhardinge 16475f4352fbSJeremy Fitzhardinge 16482f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 16495f4352fbSJeremy Fitzhardinge { 16505f4352fbSJeremy Fitzhardinge /* apply_to_page_range() does all the hard work. */ 16515f4352fbSJeremy Fitzhardinge return 0; 16525f4352fbSJeremy Fitzhardinge } 16535f4352fbSJeremy Fitzhardinge 16545f4352fbSJeremy Fitzhardinge /** 16555f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 16565f4352fbSJeremy Fitzhardinge * @size: size of the area 16577682486bSRandy Dunlap * 16587682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 16595f4352fbSJeremy Fitzhardinge * 16605f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 16615f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 16625f4352fbSJeremy Fitzhardinge * are created. If the kernel address space is not shared 16635f4352fbSJeremy Fitzhardinge * between processes, it syncs the pagetable across all 16645f4352fbSJeremy Fitzhardinge * processes. 16655f4352fbSJeremy Fitzhardinge */ 16665f4352fbSJeremy Fitzhardinge struct vm_struct *alloc_vm_area(size_t size) 16675f4352fbSJeremy Fitzhardinge { 16685f4352fbSJeremy Fitzhardinge struct vm_struct *area; 16695f4352fbSJeremy Fitzhardinge 167023016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 167123016969SChristoph Lameter __builtin_return_address(0)); 16725f4352fbSJeremy Fitzhardinge if (area == NULL) 16735f4352fbSJeremy Fitzhardinge return NULL; 16745f4352fbSJeremy Fitzhardinge 16755f4352fbSJeremy Fitzhardinge /* 16765f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 16775f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 16785f4352fbSJeremy Fitzhardinge */ 16795f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 16805f4352fbSJeremy Fitzhardinge area->size, f, NULL)) { 16815f4352fbSJeremy Fitzhardinge free_vm_area(area); 16825f4352fbSJeremy Fitzhardinge return NULL; 16835f4352fbSJeremy Fitzhardinge } 16845f4352fbSJeremy Fitzhardinge 16855f4352fbSJeremy Fitzhardinge /* Make sure the pagetables are constructed in process kernel 16865f4352fbSJeremy Fitzhardinge mappings */ 16875f4352fbSJeremy Fitzhardinge vmalloc_sync_all(); 16885f4352fbSJeremy Fitzhardinge 16895f4352fbSJeremy Fitzhardinge return area; 16905f4352fbSJeremy Fitzhardinge } 16915f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 16925f4352fbSJeremy Fitzhardinge 16935f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 16945f4352fbSJeremy Fitzhardinge { 16955f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 16965f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 16975f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 16985f4352fbSJeremy Fitzhardinge kfree(area); 16995f4352fbSJeremy Fitzhardinge } 17005f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 1701a10aa579SChristoph Lameter 1702a10aa579SChristoph Lameter 1703a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 1704a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 1705a10aa579SChristoph Lameter { 1706a10aa579SChristoph Lameter loff_t n = *pos; 1707a10aa579SChristoph Lameter struct vm_struct *v; 1708a10aa579SChristoph Lameter 1709a10aa579SChristoph Lameter read_lock(&vmlist_lock); 1710a10aa579SChristoph Lameter v = vmlist; 1711a10aa579SChristoph Lameter while (n > 0 && v) { 1712a10aa579SChristoph Lameter n--; 1713a10aa579SChristoph Lameter v = v->next; 1714a10aa579SChristoph Lameter } 1715a10aa579SChristoph Lameter if (!n) 1716a10aa579SChristoph Lameter return v; 1717a10aa579SChristoph Lameter 1718a10aa579SChristoph Lameter return NULL; 1719a10aa579SChristoph Lameter 1720a10aa579SChristoph Lameter } 1721a10aa579SChristoph Lameter 1722a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 1723a10aa579SChristoph Lameter { 1724a10aa579SChristoph Lameter struct vm_struct *v = p; 1725a10aa579SChristoph Lameter 1726a10aa579SChristoph Lameter ++*pos; 1727a10aa579SChristoph Lameter return v->next; 1728a10aa579SChristoph Lameter } 1729a10aa579SChristoph Lameter 1730a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 1731a10aa579SChristoph Lameter { 1732a10aa579SChristoph Lameter read_unlock(&vmlist_lock); 1733a10aa579SChristoph Lameter } 1734a10aa579SChristoph Lameter 1735a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 1736a47a126aSEric Dumazet { 1737a47a126aSEric Dumazet if (NUMA_BUILD) { 1738a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 1739a47a126aSEric Dumazet 1740a47a126aSEric Dumazet if (!counters) 1741a47a126aSEric Dumazet return; 1742a47a126aSEric Dumazet 1743a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 1744a47a126aSEric Dumazet 1745a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 1746a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 1747a47a126aSEric Dumazet 1748a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 1749a47a126aSEric Dumazet if (counters[nr]) 1750a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 1751a47a126aSEric Dumazet } 1752a47a126aSEric Dumazet } 1753a47a126aSEric Dumazet 1754a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 1755a10aa579SChristoph Lameter { 1756a10aa579SChristoph Lameter struct vm_struct *v = p; 1757a10aa579SChristoph Lameter 1758a10aa579SChristoph Lameter seq_printf(m, "0x%p-0x%p %7ld", 1759a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 1760a10aa579SChristoph Lameter 176123016969SChristoph Lameter if (v->caller) { 17629c246247SHugh Dickins char buff[KSYM_SYMBOL_LEN]; 176323016969SChristoph Lameter 176423016969SChristoph Lameter seq_putc(m, ' '); 176523016969SChristoph Lameter sprint_symbol(buff, (unsigned long)v->caller); 176623016969SChristoph Lameter seq_puts(m, buff); 176723016969SChristoph Lameter } 176823016969SChristoph Lameter 1769a10aa579SChristoph Lameter if (v->nr_pages) 1770a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 1771a10aa579SChristoph Lameter 1772a10aa579SChristoph Lameter if (v->phys_addr) 1773a10aa579SChristoph Lameter seq_printf(m, " phys=%lx", v->phys_addr); 1774a10aa579SChristoph Lameter 1775a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 1776a10aa579SChristoph Lameter seq_printf(m, " ioremap"); 1777a10aa579SChristoph Lameter 1778a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 1779a10aa579SChristoph Lameter seq_printf(m, " vmalloc"); 1780a10aa579SChristoph Lameter 1781a10aa579SChristoph Lameter if (v->flags & VM_MAP) 1782a10aa579SChristoph Lameter seq_printf(m, " vmap"); 1783a10aa579SChristoph Lameter 1784a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 1785a10aa579SChristoph Lameter seq_printf(m, " user"); 1786a10aa579SChristoph Lameter 1787a10aa579SChristoph Lameter if (v->flags & VM_VPAGES) 1788a10aa579SChristoph Lameter seq_printf(m, " vpages"); 1789a10aa579SChristoph Lameter 1790a47a126aSEric Dumazet show_numa_info(m, v); 1791a10aa579SChristoph Lameter seq_putc(m, '\n'); 1792a10aa579SChristoph Lameter return 0; 1793a10aa579SChristoph Lameter } 1794a10aa579SChristoph Lameter 17955f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 1796a10aa579SChristoph Lameter .start = s_start, 1797a10aa579SChristoph Lameter .next = s_next, 1798a10aa579SChristoph Lameter .stop = s_stop, 1799a10aa579SChristoph Lameter .show = s_show, 1800a10aa579SChristoph Lameter }; 18015f6a6a9cSAlexey Dobriyan 18025f6a6a9cSAlexey Dobriyan static int vmalloc_open(struct inode *inode, struct file *file) 18035f6a6a9cSAlexey Dobriyan { 18045f6a6a9cSAlexey Dobriyan unsigned int *ptr = NULL; 18055f6a6a9cSAlexey Dobriyan int ret; 18065f6a6a9cSAlexey Dobriyan 18075f6a6a9cSAlexey Dobriyan if (NUMA_BUILD) 18085f6a6a9cSAlexey Dobriyan ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 18095f6a6a9cSAlexey Dobriyan ret = seq_open(file, &vmalloc_op); 18105f6a6a9cSAlexey Dobriyan if (!ret) { 18115f6a6a9cSAlexey Dobriyan struct seq_file *m = file->private_data; 18125f6a6a9cSAlexey Dobriyan m->private = ptr; 18135f6a6a9cSAlexey Dobriyan } else 18145f6a6a9cSAlexey Dobriyan kfree(ptr); 18155f6a6a9cSAlexey Dobriyan return ret; 18165f6a6a9cSAlexey Dobriyan } 18175f6a6a9cSAlexey Dobriyan 18185f6a6a9cSAlexey Dobriyan static const struct file_operations proc_vmalloc_operations = { 18195f6a6a9cSAlexey Dobriyan .open = vmalloc_open, 18205f6a6a9cSAlexey Dobriyan .read = seq_read, 18215f6a6a9cSAlexey Dobriyan .llseek = seq_lseek, 18225f6a6a9cSAlexey Dobriyan .release = seq_release_private, 18235f6a6a9cSAlexey Dobriyan }; 18245f6a6a9cSAlexey Dobriyan 18255f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 18265f6a6a9cSAlexey Dobriyan { 18275f6a6a9cSAlexey Dobriyan proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 18285f6a6a9cSAlexey Dobriyan return 0; 18295f6a6a9cSAlexey Dobriyan } 18305f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 1831a10aa579SChristoph Lameter #endif 1832a10aa579SChristoph Lameter 1833