11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 151da177e4SLinus Torvalds #include <linux/slab.h> 161da177e4SLinus Torvalds #include <linux/spinlock.h> 171da177e4SLinus Torvalds #include <linux/interrupt.h> 185f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 19a10aa579SChristoph Lameter #include <linux/seq_file.h> 203ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2123016969SChristoph Lameter #include <linux/kallsyms.h> 22db64fe02SNick Piggin #include <linux/list.h> 23db64fe02SNick Piggin #include <linux/rbtree.h> 24db64fe02SNick Piggin #include <linux/radix-tree.h> 25db64fe02SNick Piggin #include <linux/rcupdate.h> 26822c18f2SIvan Kokshaysky #include <linux/bootmem.h> 271da177e4SLinus Torvalds 28db64fe02SNick Piggin #include <asm/atomic.h> 291da177e4SLinus Torvalds #include <asm/uaccess.h> 301da177e4SLinus Torvalds #include <asm/tlbflush.h> 311da177e4SLinus Torvalds 321da177e4SLinus Torvalds 33db64fe02SNick Piggin /*** Page table manipulation functions ***/ 34b221385bSAdrian Bunk 351da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 361da177e4SLinus Torvalds { 371da177e4SLinus Torvalds pte_t *pte; 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 401da177e4SLinus Torvalds do { 411da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 421da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 431da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 441da177e4SLinus Torvalds } 451da177e4SLinus Torvalds 46db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 471da177e4SLinus Torvalds { 481da177e4SLinus Torvalds pmd_t *pmd; 491da177e4SLinus Torvalds unsigned long next; 501da177e4SLinus Torvalds 511da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 521da177e4SLinus Torvalds do { 531da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 541da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 551da177e4SLinus Torvalds continue; 561da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 571da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 581da177e4SLinus Torvalds } 591da177e4SLinus Torvalds 60db64fe02SNick Piggin static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 611da177e4SLinus Torvalds { 621da177e4SLinus Torvalds pud_t *pud; 631da177e4SLinus Torvalds unsigned long next; 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 661da177e4SLinus Torvalds do { 671da177e4SLinus Torvalds next = pud_addr_end(addr, end); 681da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 691da177e4SLinus Torvalds continue; 701da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 711da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 721da177e4SLinus Torvalds } 731da177e4SLinus Torvalds 74db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 751da177e4SLinus Torvalds { 761da177e4SLinus Torvalds pgd_t *pgd; 771da177e4SLinus Torvalds unsigned long next; 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds BUG_ON(addr >= end); 801da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 811da177e4SLinus Torvalds do { 821da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 831da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 841da177e4SLinus Torvalds continue; 851da177e4SLinus Torvalds vunmap_pud_range(pgd, addr, next); 861da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 871da177e4SLinus Torvalds } 881da177e4SLinus Torvalds 891da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 90db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 911da177e4SLinus Torvalds { 921da177e4SLinus Torvalds pte_t *pte; 931da177e4SLinus Torvalds 94db64fe02SNick Piggin /* 95db64fe02SNick Piggin * nr is a running index into the array which helps higher level 96db64fe02SNick Piggin * callers keep track of where we're up to. 97db64fe02SNick Piggin */ 98db64fe02SNick Piggin 99872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1001da177e4SLinus Torvalds if (!pte) 1011da177e4SLinus Torvalds return -ENOMEM; 1021da177e4SLinus Torvalds do { 103db64fe02SNick Piggin struct page *page = pages[*nr]; 104db64fe02SNick Piggin 105db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 106db64fe02SNick Piggin return -EBUSY; 107db64fe02SNick Piggin if (WARN_ON(!page)) 1081da177e4SLinus Torvalds return -ENOMEM; 1091da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 110db64fe02SNick Piggin (*nr)++; 1111da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1121da177e4SLinus Torvalds return 0; 1131da177e4SLinus Torvalds } 1141da177e4SLinus Torvalds 115db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 116db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1171da177e4SLinus Torvalds { 1181da177e4SLinus Torvalds pmd_t *pmd; 1191da177e4SLinus Torvalds unsigned long next; 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1221da177e4SLinus Torvalds if (!pmd) 1231da177e4SLinus Torvalds return -ENOMEM; 1241da177e4SLinus Torvalds do { 1251da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 126db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1271da177e4SLinus Torvalds return -ENOMEM; 1281da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1291da177e4SLinus Torvalds return 0; 1301da177e4SLinus Torvalds } 1311da177e4SLinus Torvalds 132db64fe02SNick Piggin static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 133db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1341da177e4SLinus Torvalds { 1351da177e4SLinus Torvalds pud_t *pud; 1361da177e4SLinus Torvalds unsigned long next; 1371da177e4SLinus Torvalds 1381da177e4SLinus Torvalds pud = pud_alloc(&init_mm, pgd, addr); 1391da177e4SLinus Torvalds if (!pud) 1401da177e4SLinus Torvalds return -ENOMEM; 1411da177e4SLinus Torvalds do { 1421da177e4SLinus Torvalds next = pud_addr_end(addr, end); 143db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 1441da177e4SLinus Torvalds return -ENOMEM; 1451da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1461da177e4SLinus Torvalds return 0; 1471da177e4SLinus Torvalds } 1481da177e4SLinus Torvalds 149db64fe02SNick Piggin /* 150db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 151db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 152db64fe02SNick Piggin * 153db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 154db64fe02SNick Piggin */ 1552e4e27c7SAdam Lackorzynski static int vmap_page_range(unsigned long start, unsigned long end, 156db64fe02SNick Piggin pgprot_t prot, struct page **pages) 1571da177e4SLinus Torvalds { 1581da177e4SLinus Torvalds pgd_t *pgd; 1591da177e4SLinus Torvalds unsigned long next; 1602e4e27c7SAdam Lackorzynski unsigned long addr = start; 161db64fe02SNick Piggin int err = 0; 162db64fe02SNick Piggin int nr = 0; 1631da177e4SLinus Torvalds 1641da177e4SLinus Torvalds BUG_ON(addr >= end); 1651da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1661da177e4SLinus Torvalds do { 1671da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 168db64fe02SNick Piggin err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 1691da177e4SLinus Torvalds if (err) 1701da177e4SLinus Torvalds break; 1711da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1722e4e27c7SAdam Lackorzynski flush_cache_vmap(start, end); 173db64fe02SNick Piggin 174db64fe02SNick Piggin if (unlikely(err)) 1751da177e4SLinus Torvalds return err; 176db64fe02SNick Piggin return nr; 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 17973bdf0a6SLinus Torvalds static inline int is_vmalloc_or_module_addr(const void *x) 18073bdf0a6SLinus Torvalds { 18173bdf0a6SLinus Torvalds /* 182ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 18373bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 18473bdf0a6SLinus Torvalds * just put it in the vmalloc space. 18573bdf0a6SLinus Torvalds */ 18673bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 18773bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 18873bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 18973bdf0a6SLinus Torvalds return 1; 19073bdf0a6SLinus Torvalds #endif 19173bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 19273bdf0a6SLinus Torvalds } 19373bdf0a6SLinus Torvalds 19448667e7aSChristoph Lameter /* 195db64fe02SNick Piggin * Walk a vmap address to the struct page it maps. 19648667e7aSChristoph Lameter */ 197b3bdda02SChristoph Lameter struct page *vmalloc_to_page(const void *vmalloc_addr) 19848667e7aSChristoph Lameter { 19948667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 20048667e7aSChristoph Lameter struct page *page = NULL; 20148667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 20248667e7aSChristoph Lameter 2037aa413deSIngo Molnar /* 2047aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2057aa413deSIngo Molnar * architectures that do not vmalloc module space 2067aa413deSIngo Molnar */ 20773bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 20859ea7463SJiri Slaby 20948667e7aSChristoph Lameter if (!pgd_none(*pgd)) { 210db64fe02SNick Piggin pud_t *pud = pud_offset(pgd, addr); 21148667e7aSChristoph Lameter if (!pud_none(*pud)) { 212db64fe02SNick Piggin pmd_t *pmd = pmd_offset(pud, addr); 21348667e7aSChristoph Lameter if (!pmd_none(*pmd)) { 214db64fe02SNick Piggin pte_t *ptep, pte; 215db64fe02SNick Piggin 21648667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 21748667e7aSChristoph Lameter pte = *ptep; 21848667e7aSChristoph Lameter if (pte_present(pte)) 21948667e7aSChristoph Lameter page = pte_page(pte); 22048667e7aSChristoph Lameter pte_unmap(ptep); 22148667e7aSChristoph Lameter } 22248667e7aSChristoph Lameter } 22348667e7aSChristoph Lameter } 22448667e7aSChristoph Lameter return page; 22548667e7aSChristoph Lameter } 22648667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_page); 22748667e7aSChristoph Lameter 22848667e7aSChristoph Lameter /* 22948667e7aSChristoph Lameter * Map a vmalloc()-space virtual address to the physical page frame number. 23048667e7aSChristoph Lameter */ 231b3bdda02SChristoph Lameter unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 23248667e7aSChristoph Lameter { 23348667e7aSChristoph Lameter return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 23448667e7aSChristoph Lameter } 23548667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_pfn); 23648667e7aSChristoph Lameter 237db64fe02SNick Piggin 238db64fe02SNick Piggin /*** Global kva allocator ***/ 239db64fe02SNick Piggin 240db64fe02SNick Piggin #define VM_LAZY_FREE 0x01 241db64fe02SNick Piggin #define VM_LAZY_FREEING 0x02 242db64fe02SNick Piggin #define VM_VM_AREA 0x04 243db64fe02SNick Piggin 244db64fe02SNick Piggin struct vmap_area { 245db64fe02SNick Piggin unsigned long va_start; 246db64fe02SNick Piggin unsigned long va_end; 247db64fe02SNick Piggin unsigned long flags; 248db64fe02SNick Piggin struct rb_node rb_node; /* address sorted rbtree */ 249db64fe02SNick Piggin struct list_head list; /* address sorted list */ 250db64fe02SNick Piggin struct list_head purge_list; /* "lazy purge" list */ 251db64fe02SNick Piggin void *private; 252db64fe02SNick Piggin struct rcu_head rcu_head; 253db64fe02SNick Piggin }; 254db64fe02SNick Piggin 255db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 256db64fe02SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 257db64fe02SNick Piggin static LIST_HEAD(vmap_area_list); 258db64fe02SNick Piggin 259db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 2601da177e4SLinus Torvalds { 261db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 262db64fe02SNick Piggin 263db64fe02SNick Piggin while (n) { 264db64fe02SNick Piggin struct vmap_area *va; 265db64fe02SNick Piggin 266db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 267db64fe02SNick Piggin if (addr < va->va_start) 268db64fe02SNick Piggin n = n->rb_left; 269db64fe02SNick Piggin else if (addr > va->va_start) 270db64fe02SNick Piggin n = n->rb_right; 271db64fe02SNick Piggin else 272db64fe02SNick Piggin return va; 273db64fe02SNick Piggin } 274db64fe02SNick Piggin 275db64fe02SNick Piggin return NULL; 276db64fe02SNick Piggin } 277db64fe02SNick Piggin 278db64fe02SNick Piggin static void __insert_vmap_area(struct vmap_area *va) 279db64fe02SNick Piggin { 280db64fe02SNick Piggin struct rb_node **p = &vmap_area_root.rb_node; 281db64fe02SNick Piggin struct rb_node *parent = NULL; 282db64fe02SNick Piggin struct rb_node *tmp; 283db64fe02SNick Piggin 284db64fe02SNick Piggin while (*p) { 285db64fe02SNick Piggin struct vmap_area *tmp; 286db64fe02SNick Piggin 287db64fe02SNick Piggin parent = *p; 288db64fe02SNick Piggin tmp = rb_entry(parent, struct vmap_area, rb_node); 289db64fe02SNick Piggin if (va->va_start < tmp->va_end) 290db64fe02SNick Piggin p = &(*p)->rb_left; 291db64fe02SNick Piggin else if (va->va_end > tmp->va_start) 292db64fe02SNick Piggin p = &(*p)->rb_right; 293db64fe02SNick Piggin else 294db64fe02SNick Piggin BUG(); 295db64fe02SNick Piggin } 296db64fe02SNick Piggin 297db64fe02SNick Piggin rb_link_node(&va->rb_node, parent, p); 298db64fe02SNick Piggin rb_insert_color(&va->rb_node, &vmap_area_root); 299db64fe02SNick Piggin 300db64fe02SNick Piggin /* address-sort this list so it is usable like the vmlist */ 301db64fe02SNick Piggin tmp = rb_prev(&va->rb_node); 302db64fe02SNick Piggin if (tmp) { 303db64fe02SNick Piggin struct vmap_area *prev; 304db64fe02SNick Piggin prev = rb_entry(tmp, struct vmap_area, rb_node); 305db64fe02SNick Piggin list_add_rcu(&va->list, &prev->list); 306db64fe02SNick Piggin } else 307db64fe02SNick Piggin list_add_rcu(&va->list, &vmap_area_list); 308db64fe02SNick Piggin } 309db64fe02SNick Piggin 310db64fe02SNick Piggin static void purge_vmap_area_lazy(void); 311db64fe02SNick Piggin 312db64fe02SNick Piggin /* 313db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 314db64fe02SNick Piggin * vstart and vend. 315db64fe02SNick Piggin */ 316db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 317db64fe02SNick Piggin unsigned long align, 318db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 319db64fe02SNick Piggin int node, gfp_t gfp_mask) 320db64fe02SNick Piggin { 321db64fe02SNick Piggin struct vmap_area *va; 322db64fe02SNick Piggin struct rb_node *n; 3231da177e4SLinus Torvalds unsigned long addr; 324db64fe02SNick Piggin int purged = 0; 325db64fe02SNick Piggin 3267766970cSNick Piggin BUG_ON(!size); 327db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 328db64fe02SNick Piggin 329db64fe02SNick Piggin va = kmalloc_node(sizeof(struct vmap_area), 330db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 331db64fe02SNick Piggin if (unlikely(!va)) 332db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 333db64fe02SNick Piggin 334db64fe02SNick Piggin retry: 3350ae15132SGlauber Costa addr = ALIGN(vstart, align); 3360ae15132SGlauber Costa 337db64fe02SNick Piggin spin_lock(&vmap_area_lock); 3387766970cSNick Piggin if (addr + size - 1 < addr) 3397766970cSNick Piggin goto overflow; 3407766970cSNick Piggin 341db64fe02SNick Piggin /* XXX: could have a last_hole cache */ 342db64fe02SNick Piggin n = vmap_area_root.rb_node; 343db64fe02SNick Piggin if (n) { 344db64fe02SNick Piggin struct vmap_area *first = NULL; 345db64fe02SNick Piggin 346db64fe02SNick Piggin do { 347db64fe02SNick Piggin struct vmap_area *tmp; 348db64fe02SNick Piggin tmp = rb_entry(n, struct vmap_area, rb_node); 349db64fe02SNick Piggin if (tmp->va_end >= addr) { 350db64fe02SNick Piggin if (!first && tmp->va_start < addr + size) 351db64fe02SNick Piggin first = tmp; 352db64fe02SNick Piggin n = n->rb_left; 353db64fe02SNick Piggin } else { 354db64fe02SNick Piggin first = tmp; 355db64fe02SNick Piggin n = n->rb_right; 356db64fe02SNick Piggin } 357db64fe02SNick Piggin } while (n); 358db64fe02SNick Piggin 359db64fe02SNick Piggin if (!first) 360db64fe02SNick Piggin goto found; 361db64fe02SNick Piggin 362db64fe02SNick Piggin if (first->va_end < addr) { 363db64fe02SNick Piggin n = rb_next(&first->rb_node); 364db64fe02SNick Piggin if (n) 365db64fe02SNick Piggin first = rb_entry(n, struct vmap_area, rb_node); 366db64fe02SNick Piggin else 367db64fe02SNick Piggin goto found; 368db64fe02SNick Piggin } 369db64fe02SNick Piggin 370f011c2daSNick Piggin while (addr + size > first->va_start && addr + size <= vend) { 371db64fe02SNick Piggin addr = ALIGN(first->va_end + PAGE_SIZE, align); 3727766970cSNick Piggin if (addr + size - 1 < addr) 3737766970cSNick Piggin goto overflow; 374db64fe02SNick Piggin 375db64fe02SNick Piggin n = rb_next(&first->rb_node); 376db64fe02SNick Piggin if (n) 377db64fe02SNick Piggin first = rb_entry(n, struct vmap_area, rb_node); 378db64fe02SNick Piggin else 379db64fe02SNick Piggin goto found; 380db64fe02SNick Piggin } 381db64fe02SNick Piggin } 382db64fe02SNick Piggin found: 383db64fe02SNick Piggin if (addr + size > vend) { 3847766970cSNick Piggin overflow: 385db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 386db64fe02SNick Piggin if (!purged) { 387db64fe02SNick Piggin purge_vmap_area_lazy(); 388db64fe02SNick Piggin purged = 1; 389db64fe02SNick Piggin goto retry; 390db64fe02SNick Piggin } 391db64fe02SNick Piggin if (printk_ratelimit()) 392c1279c4eSGlauber Costa printk(KERN_WARNING 393c1279c4eSGlauber Costa "vmap allocation for size %lu failed: " 394c1279c4eSGlauber Costa "use vmalloc=<size> to increase size.\n", size); 395db64fe02SNick Piggin return ERR_PTR(-EBUSY); 396db64fe02SNick Piggin } 397db64fe02SNick Piggin 398db64fe02SNick Piggin BUG_ON(addr & (align-1)); 399db64fe02SNick Piggin 400db64fe02SNick Piggin va->va_start = addr; 401db64fe02SNick Piggin va->va_end = addr + size; 402db64fe02SNick Piggin va->flags = 0; 403db64fe02SNick Piggin __insert_vmap_area(va); 404db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 405db64fe02SNick Piggin 406db64fe02SNick Piggin return va; 407db64fe02SNick Piggin } 408db64fe02SNick Piggin 409db64fe02SNick Piggin static void rcu_free_va(struct rcu_head *head) 410db64fe02SNick Piggin { 411db64fe02SNick Piggin struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); 412db64fe02SNick Piggin 413db64fe02SNick Piggin kfree(va); 414db64fe02SNick Piggin } 415db64fe02SNick Piggin 416db64fe02SNick Piggin static void __free_vmap_area(struct vmap_area *va) 417db64fe02SNick Piggin { 418db64fe02SNick Piggin BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 419db64fe02SNick Piggin rb_erase(&va->rb_node, &vmap_area_root); 420db64fe02SNick Piggin RB_CLEAR_NODE(&va->rb_node); 421db64fe02SNick Piggin list_del_rcu(&va->list); 422db64fe02SNick Piggin 423db64fe02SNick Piggin call_rcu(&va->rcu_head, rcu_free_va); 424db64fe02SNick Piggin } 425db64fe02SNick Piggin 426db64fe02SNick Piggin /* 427db64fe02SNick Piggin * Free a region of KVA allocated by alloc_vmap_area 428db64fe02SNick Piggin */ 429db64fe02SNick Piggin static void free_vmap_area(struct vmap_area *va) 430db64fe02SNick Piggin { 431db64fe02SNick Piggin spin_lock(&vmap_area_lock); 432db64fe02SNick Piggin __free_vmap_area(va); 433db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 434db64fe02SNick Piggin } 435db64fe02SNick Piggin 436db64fe02SNick Piggin /* 437db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 438db64fe02SNick Piggin */ 439db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 440db64fe02SNick Piggin { 441db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 442db64fe02SNick Piggin } 443db64fe02SNick Piggin 444cd52858cSNick Piggin static void vmap_debug_free_range(unsigned long start, unsigned long end) 445cd52858cSNick Piggin { 446cd52858cSNick Piggin /* 447cd52858cSNick Piggin * Unmap page tables and force a TLB flush immediately if 448cd52858cSNick Piggin * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 449cd52858cSNick Piggin * bugs similarly to those in linear kernel virtual address 450cd52858cSNick Piggin * space after a page has been freed. 451cd52858cSNick Piggin * 452cd52858cSNick Piggin * All the lazy freeing logic is still retained, in order to 453cd52858cSNick Piggin * minimise intrusiveness of this debugging feature. 454cd52858cSNick Piggin * 455cd52858cSNick Piggin * This is going to be *slow* (linear kernel virtual address 456cd52858cSNick Piggin * debugging doesn't do a broadcast TLB flush so it is a lot 457cd52858cSNick Piggin * faster). 458cd52858cSNick Piggin */ 459cd52858cSNick Piggin #ifdef CONFIG_DEBUG_PAGEALLOC 460cd52858cSNick Piggin vunmap_page_range(start, end); 461cd52858cSNick Piggin flush_tlb_kernel_range(start, end); 462cd52858cSNick Piggin #endif 463cd52858cSNick Piggin } 464cd52858cSNick Piggin 465db64fe02SNick Piggin /* 466db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 467db64fe02SNick Piggin * before attempting to purge with a TLB flush. 468db64fe02SNick Piggin * 469db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 470db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 471db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 472db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 473db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 474db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 475db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 476db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 477db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 478db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 479db64fe02SNick Piggin * becomes a problem on bigger systems. 480db64fe02SNick Piggin */ 481db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 482db64fe02SNick Piggin { 483db64fe02SNick Piggin unsigned int log; 484db64fe02SNick Piggin 485db64fe02SNick Piggin log = fls(num_online_cpus()); 486db64fe02SNick Piggin 487db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 488db64fe02SNick Piggin } 489db64fe02SNick Piggin 490db64fe02SNick Piggin static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 491db64fe02SNick Piggin 492db64fe02SNick Piggin /* 493db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 494db64fe02SNick Piggin * 495db64fe02SNick Piggin * If sync is 0 then don't purge if there is already a purge in progress. 496db64fe02SNick Piggin * If force_flush is 1, then flush kernel TLBs between *start and *end even 497db64fe02SNick Piggin * if we found no lazy vmap areas to unmap (callers can use this to optimise 498db64fe02SNick Piggin * their own TLB flushing). 499db64fe02SNick Piggin * Returns with *start = min(*start, lowest purged address) 500db64fe02SNick Piggin * *end = max(*end, highest purged address) 501db64fe02SNick Piggin */ 502db64fe02SNick Piggin static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 503db64fe02SNick Piggin int sync, int force_flush) 504db64fe02SNick Piggin { 50546666d8aSAndrew Morton static DEFINE_SPINLOCK(purge_lock); 506db64fe02SNick Piggin LIST_HEAD(valist); 507db64fe02SNick Piggin struct vmap_area *va; 508*cbb76676SVegard Nossum struct vmap_area *n_va; 509db64fe02SNick Piggin int nr = 0; 510db64fe02SNick Piggin 511db64fe02SNick Piggin /* 512db64fe02SNick Piggin * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 513db64fe02SNick Piggin * should not expect such behaviour. This just simplifies locking for 514db64fe02SNick Piggin * the case that isn't actually used at the moment anyway. 515db64fe02SNick Piggin */ 516db64fe02SNick Piggin if (!sync && !force_flush) { 51746666d8aSAndrew Morton if (!spin_trylock(&purge_lock)) 518db64fe02SNick Piggin return; 519db64fe02SNick Piggin } else 52046666d8aSAndrew Morton spin_lock(&purge_lock); 521db64fe02SNick Piggin 522db64fe02SNick Piggin rcu_read_lock(); 523db64fe02SNick Piggin list_for_each_entry_rcu(va, &vmap_area_list, list) { 524db64fe02SNick Piggin if (va->flags & VM_LAZY_FREE) { 525db64fe02SNick Piggin if (va->va_start < *start) 526db64fe02SNick Piggin *start = va->va_start; 527db64fe02SNick Piggin if (va->va_end > *end) 528db64fe02SNick Piggin *end = va->va_end; 529db64fe02SNick Piggin nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 530db64fe02SNick Piggin unmap_vmap_area(va); 531db64fe02SNick Piggin list_add_tail(&va->purge_list, &valist); 532db64fe02SNick Piggin va->flags |= VM_LAZY_FREEING; 533db64fe02SNick Piggin va->flags &= ~VM_LAZY_FREE; 534db64fe02SNick Piggin } 535db64fe02SNick Piggin } 536db64fe02SNick Piggin rcu_read_unlock(); 537db64fe02SNick Piggin 538db64fe02SNick Piggin if (nr) { 539db64fe02SNick Piggin BUG_ON(nr > atomic_read(&vmap_lazy_nr)); 540db64fe02SNick Piggin atomic_sub(nr, &vmap_lazy_nr); 541db64fe02SNick Piggin } 542db64fe02SNick Piggin 543db64fe02SNick Piggin if (nr || force_flush) 544db64fe02SNick Piggin flush_tlb_kernel_range(*start, *end); 545db64fe02SNick Piggin 546db64fe02SNick Piggin if (nr) { 547db64fe02SNick Piggin spin_lock(&vmap_area_lock); 548*cbb76676SVegard Nossum list_for_each_entry_safe(va, n_va, &valist, purge_list) 549db64fe02SNick Piggin __free_vmap_area(va); 550db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 551db64fe02SNick Piggin } 55246666d8aSAndrew Morton spin_unlock(&purge_lock); 553db64fe02SNick Piggin } 554db64fe02SNick Piggin 555db64fe02SNick Piggin /* 556496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 557496850e5SNick Piggin * is already purging. 558496850e5SNick Piggin */ 559496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 560496850e5SNick Piggin { 561496850e5SNick Piggin unsigned long start = ULONG_MAX, end = 0; 562496850e5SNick Piggin 563496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 0, 0); 564496850e5SNick Piggin } 565496850e5SNick Piggin 566496850e5SNick Piggin /* 567db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 568db64fe02SNick Piggin */ 569db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 570db64fe02SNick Piggin { 571db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 572db64fe02SNick Piggin 573496850e5SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, 0); 574db64fe02SNick Piggin } 575db64fe02SNick Piggin 576db64fe02SNick Piggin /* 577b29acbdcSNick Piggin * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 578b29acbdcSNick Piggin * called for the correct range previously. 579db64fe02SNick Piggin */ 580b29acbdcSNick Piggin static void free_unmap_vmap_area_noflush(struct vmap_area *va) 581db64fe02SNick Piggin { 582db64fe02SNick Piggin va->flags |= VM_LAZY_FREE; 583db64fe02SNick Piggin atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 584db64fe02SNick Piggin if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 585496850e5SNick Piggin try_purge_vmap_area_lazy(); 586db64fe02SNick Piggin } 587db64fe02SNick Piggin 588b29acbdcSNick Piggin /* 589b29acbdcSNick Piggin * Free and unmap a vmap area 590b29acbdcSNick Piggin */ 591b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 592b29acbdcSNick Piggin { 593b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 594b29acbdcSNick Piggin free_unmap_vmap_area_noflush(va); 595b29acbdcSNick Piggin } 596b29acbdcSNick Piggin 597db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 598db64fe02SNick Piggin { 599db64fe02SNick Piggin struct vmap_area *va; 600db64fe02SNick Piggin 601db64fe02SNick Piggin spin_lock(&vmap_area_lock); 602db64fe02SNick Piggin va = __find_vmap_area(addr); 603db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 604db64fe02SNick Piggin 605db64fe02SNick Piggin return va; 606db64fe02SNick Piggin } 607db64fe02SNick Piggin 608db64fe02SNick Piggin static void free_unmap_vmap_area_addr(unsigned long addr) 609db64fe02SNick Piggin { 610db64fe02SNick Piggin struct vmap_area *va; 611db64fe02SNick Piggin 612db64fe02SNick Piggin va = find_vmap_area(addr); 613db64fe02SNick Piggin BUG_ON(!va); 614db64fe02SNick Piggin free_unmap_vmap_area(va); 615db64fe02SNick Piggin } 616db64fe02SNick Piggin 617db64fe02SNick Piggin 618db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 619db64fe02SNick Piggin 620db64fe02SNick Piggin /* 621db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 622db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 623db64fe02SNick Piggin */ 624db64fe02SNick Piggin /* 625db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 626db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 627db64fe02SNick Piggin * instead (we just need a rough idea) 628db64fe02SNick Piggin */ 629db64fe02SNick Piggin #if BITS_PER_LONG == 32 630db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 631db64fe02SNick Piggin #else 632db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 633db64fe02SNick Piggin #endif 634db64fe02SNick Piggin 635db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 636db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 637db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 638db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 639db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 640db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 641db64fe02SNick Piggin #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 642db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 643db64fe02SNick Piggin VMALLOC_PAGES / NR_CPUS / 16)) 644db64fe02SNick Piggin 645db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 646db64fe02SNick Piggin 6479b463334SJeremy Fitzhardinge static bool vmap_initialized __read_mostly = false; 6489b463334SJeremy Fitzhardinge 649db64fe02SNick Piggin struct vmap_block_queue { 650db64fe02SNick Piggin spinlock_t lock; 651db64fe02SNick Piggin struct list_head free; 652db64fe02SNick Piggin struct list_head dirty; 653db64fe02SNick Piggin unsigned int nr_dirty; 654db64fe02SNick Piggin }; 655db64fe02SNick Piggin 656db64fe02SNick Piggin struct vmap_block { 657db64fe02SNick Piggin spinlock_t lock; 658db64fe02SNick Piggin struct vmap_area *va; 659db64fe02SNick Piggin struct vmap_block_queue *vbq; 660db64fe02SNick Piggin unsigned long free, dirty; 661db64fe02SNick Piggin DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 662db64fe02SNick Piggin DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 663db64fe02SNick Piggin union { 664db64fe02SNick Piggin struct { 665db64fe02SNick Piggin struct list_head free_list; 666db64fe02SNick Piggin struct list_head dirty_list; 667db64fe02SNick Piggin }; 668db64fe02SNick Piggin struct rcu_head rcu_head; 669db64fe02SNick Piggin }; 670db64fe02SNick Piggin }; 671db64fe02SNick Piggin 672db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 673db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 674db64fe02SNick Piggin 675db64fe02SNick Piggin /* 676db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 677db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 678db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 679db64fe02SNick Piggin */ 680db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 681db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 682db64fe02SNick Piggin 683db64fe02SNick Piggin /* 684db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 685db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 686db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 687db64fe02SNick Piggin * big problem. 688db64fe02SNick Piggin */ 689db64fe02SNick Piggin 690db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 691db64fe02SNick Piggin { 692db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 693db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 694db64fe02SNick Piggin return addr; 695db64fe02SNick Piggin } 696db64fe02SNick Piggin 697db64fe02SNick Piggin static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 698db64fe02SNick Piggin { 699db64fe02SNick Piggin struct vmap_block_queue *vbq; 700db64fe02SNick Piggin struct vmap_block *vb; 701db64fe02SNick Piggin struct vmap_area *va; 702db64fe02SNick Piggin unsigned long vb_idx; 703db64fe02SNick Piggin int node, err; 704db64fe02SNick Piggin 705db64fe02SNick Piggin node = numa_node_id(); 706db64fe02SNick Piggin 707db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 708db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 709db64fe02SNick Piggin if (unlikely(!vb)) 710db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 711db64fe02SNick Piggin 712db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 713db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 714db64fe02SNick Piggin node, gfp_mask); 715db64fe02SNick Piggin if (unlikely(IS_ERR(va))) { 716db64fe02SNick Piggin kfree(vb); 717db64fe02SNick Piggin return ERR_PTR(PTR_ERR(va)); 718db64fe02SNick Piggin } 719db64fe02SNick Piggin 720db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 721db64fe02SNick Piggin if (unlikely(err)) { 722db64fe02SNick Piggin kfree(vb); 723db64fe02SNick Piggin free_vmap_area(va); 724db64fe02SNick Piggin return ERR_PTR(err); 725db64fe02SNick Piggin } 726db64fe02SNick Piggin 727db64fe02SNick Piggin spin_lock_init(&vb->lock); 728db64fe02SNick Piggin vb->va = va; 729db64fe02SNick Piggin vb->free = VMAP_BBMAP_BITS; 730db64fe02SNick Piggin vb->dirty = 0; 731db64fe02SNick Piggin bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 732db64fe02SNick Piggin bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 733db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 734db64fe02SNick Piggin INIT_LIST_HEAD(&vb->dirty_list); 735db64fe02SNick Piggin 736db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 737db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 738db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 739db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 740db64fe02SNick Piggin BUG_ON(err); 741db64fe02SNick Piggin radix_tree_preload_end(); 742db64fe02SNick Piggin 743db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 744db64fe02SNick Piggin vb->vbq = vbq; 745db64fe02SNick Piggin spin_lock(&vbq->lock); 746db64fe02SNick Piggin list_add(&vb->free_list, &vbq->free); 747db64fe02SNick Piggin spin_unlock(&vbq->lock); 748db64fe02SNick Piggin put_cpu_var(vmap_cpu_blocks); 749db64fe02SNick Piggin 750db64fe02SNick Piggin return vb; 751db64fe02SNick Piggin } 752db64fe02SNick Piggin 753db64fe02SNick Piggin static void rcu_free_vb(struct rcu_head *head) 754db64fe02SNick Piggin { 755db64fe02SNick Piggin struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); 756db64fe02SNick Piggin 757db64fe02SNick Piggin kfree(vb); 758db64fe02SNick Piggin } 759db64fe02SNick Piggin 760db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 761db64fe02SNick Piggin { 762db64fe02SNick Piggin struct vmap_block *tmp; 763db64fe02SNick Piggin unsigned long vb_idx; 764db64fe02SNick Piggin 765db64fe02SNick Piggin spin_lock(&vb->vbq->lock); 766db64fe02SNick Piggin if (!list_empty(&vb->free_list)) 767db64fe02SNick Piggin list_del(&vb->free_list); 768db64fe02SNick Piggin if (!list_empty(&vb->dirty_list)) 769db64fe02SNick Piggin list_del(&vb->dirty_list); 770db64fe02SNick Piggin spin_unlock(&vb->vbq->lock); 771db64fe02SNick Piggin 772db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 773db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 774db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 775db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 776db64fe02SNick Piggin BUG_ON(tmp != vb); 777db64fe02SNick Piggin 778b29acbdcSNick Piggin free_unmap_vmap_area_noflush(vb->va); 779db64fe02SNick Piggin call_rcu(&vb->rcu_head, rcu_free_vb); 780db64fe02SNick Piggin } 781db64fe02SNick Piggin 782db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 783db64fe02SNick Piggin { 784db64fe02SNick Piggin struct vmap_block_queue *vbq; 785db64fe02SNick Piggin struct vmap_block *vb; 786db64fe02SNick Piggin unsigned long addr = 0; 787db64fe02SNick Piggin unsigned int order; 788db64fe02SNick Piggin 789db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 790db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 791db64fe02SNick Piggin order = get_order(size); 792db64fe02SNick Piggin 793db64fe02SNick Piggin again: 794db64fe02SNick Piggin rcu_read_lock(); 795db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 796db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 797db64fe02SNick Piggin int i; 798db64fe02SNick Piggin 799db64fe02SNick Piggin spin_lock(&vb->lock); 800db64fe02SNick Piggin i = bitmap_find_free_region(vb->alloc_map, 801db64fe02SNick Piggin VMAP_BBMAP_BITS, order); 802db64fe02SNick Piggin 803db64fe02SNick Piggin if (i >= 0) { 804db64fe02SNick Piggin addr = vb->va->va_start + (i << PAGE_SHIFT); 805db64fe02SNick Piggin BUG_ON(addr_to_vb_idx(addr) != 806db64fe02SNick Piggin addr_to_vb_idx(vb->va->va_start)); 807db64fe02SNick Piggin vb->free -= 1UL << order; 808db64fe02SNick Piggin if (vb->free == 0) { 809db64fe02SNick Piggin spin_lock(&vbq->lock); 810db64fe02SNick Piggin list_del_init(&vb->free_list); 811db64fe02SNick Piggin spin_unlock(&vbq->lock); 812db64fe02SNick Piggin } 813db64fe02SNick Piggin spin_unlock(&vb->lock); 814db64fe02SNick Piggin break; 815db64fe02SNick Piggin } 816db64fe02SNick Piggin spin_unlock(&vb->lock); 817db64fe02SNick Piggin } 818db64fe02SNick Piggin put_cpu_var(vmap_cpu_blocks); 819db64fe02SNick Piggin rcu_read_unlock(); 820db64fe02SNick Piggin 821db64fe02SNick Piggin if (!addr) { 822db64fe02SNick Piggin vb = new_vmap_block(gfp_mask); 823db64fe02SNick Piggin if (IS_ERR(vb)) 824db64fe02SNick Piggin return vb; 825db64fe02SNick Piggin goto again; 826db64fe02SNick Piggin } 827db64fe02SNick Piggin 828db64fe02SNick Piggin return (void *)addr; 829db64fe02SNick Piggin } 830db64fe02SNick Piggin 831db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 832db64fe02SNick Piggin { 833db64fe02SNick Piggin unsigned long offset; 834db64fe02SNick Piggin unsigned long vb_idx; 835db64fe02SNick Piggin unsigned int order; 836db64fe02SNick Piggin struct vmap_block *vb; 837db64fe02SNick Piggin 838db64fe02SNick Piggin BUG_ON(size & ~PAGE_MASK); 839db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 840b29acbdcSNick Piggin 841b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 842b29acbdcSNick Piggin 843db64fe02SNick Piggin order = get_order(size); 844db64fe02SNick Piggin 845db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 846db64fe02SNick Piggin 847db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 848db64fe02SNick Piggin rcu_read_lock(); 849db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 850db64fe02SNick Piggin rcu_read_unlock(); 851db64fe02SNick Piggin BUG_ON(!vb); 852db64fe02SNick Piggin 853db64fe02SNick Piggin spin_lock(&vb->lock); 854db64fe02SNick Piggin bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); 855db64fe02SNick Piggin if (!vb->dirty) { 856db64fe02SNick Piggin spin_lock(&vb->vbq->lock); 857db64fe02SNick Piggin list_add(&vb->dirty_list, &vb->vbq->dirty); 858db64fe02SNick Piggin spin_unlock(&vb->vbq->lock); 859db64fe02SNick Piggin } 860db64fe02SNick Piggin vb->dirty += 1UL << order; 861db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 862db64fe02SNick Piggin BUG_ON(vb->free || !list_empty(&vb->free_list)); 863db64fe02SNick Piggin spin_unlock(&vb->lock); 864db64fe02SNick Piggin free_vmap_block(vb); 865db64fe02SNick Piggin } else 866db64fe02SNick Piggin spin_unlock(&vb->lock); 867db64fe02SNick Piggin } 868db64fe02SNick Piggin 869db64fe02SNick Piggin /** 870db64fe02SNick Piggin * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 871db64fe02SNick Piggin * 872db64fe02SNick Piggin * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 873db64fe02SNick Piggin * to amortize TLB flushing overheads. What this means is that any page you 874db64fe02SNick Piggin * have now, may, in a former life, have been mapped into kernel virtual 875db64fe02SNick Piggin * address by the vmap layer and so there might be some CPUs with TLB entries 876db64fe02SNick Piggin * still referencing that page (additional to the regular 1:1 kernel mapping). 877db64fe02SNick Piggin * 878db64fe02SNick Piggin * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 879db64fe02SNick Piggin * be sure that none of the pages we have control over will have any aliases 880db64fe02SNick Piggin * from the vmap layer. 881db64fe02SNick Piggin */ 882db64fe02SNick Piggin void vm_unmap_aliases(void) 883db64fe02SNick Piggin { 884db64fe02SNick Piggin unsigned long start = ULONG_MAX, end = 0; 885db64fe02SNick Piggin int cpu; 886db64fe02SNick Piggin int flush = 0; 887db64fe02SNick Piggin 8889b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 8899b463334SJeremy Fitzhardinge return; 8909b463334SJeremy Fitzhardinge 891db64fe02SNick Piggin for_each_possible_cpu(cpu) { 892db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 893db64fe02SNick Piggin struct vmap_block *vb; 894db64fe02SNick Piggin 895db64fe02SNick Piggin rcu_read_lock(); 896db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 897db64fe02SNick Piggin int i; 898db64fe02SNick Piggin 899db64fe02SNick Piggin spin_lock(&vb->lock); 900db64fe02SNick Piggin i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 901db64fe02SNick Piggin while (i < VMAP_BBMAP_BITS) { 902db64fe02SNick Piggin unsigned long s, e; 903db64fe02SNick Piggin int j; 904db64fe02SNick Piggin j = find_next_zero_bit(vb->dirty_map, 905db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 906db64fe02SNick Piggin 907db64fe02SNick Piggin s = vb->va->va_start + (i << PAGE_SHIFT); 908db64fe02SNick Piggin e = vb->va->va_start + (j << PAGE_SHIFT); 909db64fe02SNick Piggin vunmap_page_range(s, e); 910db64fe02SNick Piggin flush = 1; 911db64fe02SNick Piggin 912db64fe02SNick Piggin if (s < start) 913db64fe02SNick Piggin start = s; 914db64fe02SNick Piggin if (e > end) 915db64fe02SNick Piggin end = e; 916db64fe02SNick Piggin 917db64fe02SNick Piggin i = j; 918db64fe02SNick Piggin i = find_next_bit(vb->dirty_map, 919db64fe02SNick Piggin VMAP_BBMAP_BITS, i); 920db64fe02SNick Piggin } 921db64fe02SNick Piggin spin_unlock(&vb->lock); 922db64fe02SNick Piggin } 923db64fe02SNick Piggin rcu_read_unlock(); 924db64fe02SNick Piggin } 925db64fe02SNick Piggin 926db64fe02SNick Piggin __purge_vmap_area_lazy(&start, &end, 1, flush); 927db64fe02SNick Piggin } 928db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 929db64fe02SNick Piggin 930db64fe02SNick Piggin /** 931db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 932db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 933db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 934db64fe02SNick Piggin */ 935db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 936db64fe02SNick Piggin { 937db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 938db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 939db64fe02SNick Piggin 940db64fe02SNick Piggin BUG_ON(!addr); 941db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 942db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 943db64fe02SNick Piggin BUG_ON(addr & (PAGE_SIZE-1)); 944db64fe02SNick Piggin 945db64fe02SNick Piggin debug_check_no_locks_freed(mem, size); 946cd52858cSNick Piggin vmap_debug_free_range(addr, addr+size); 947db64fe02SNick Piggin 948db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) 949db64fe02SNick Piggin vb_free(mem, size); 950db64fe02SNick Piggin else 951db64fe02SNick Piggin free_unmap_vmap_area_addr(addr); 952db64fe02SNick Piggin } 953db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 954db64fe02SNick Piggin 955db64fe02SNick Piggin /** 956db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 957db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 958db64fe02SNick Piggin * @count: number of pages 959db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 960db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 961e99c97adSRandy Dunlap * 962e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 963db64fe02SNick Piggin */ 964db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 965db64fe02SNick Piggin { 966db64fe02SNick Piggin unsigned long size = count << PAGE_SHIFT; 967db64fe02SNick Piggin unsigned long addr; 968db64fe02SNick Piggin void *mem; 969db64fe02SNick Piggin 970db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 971db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 972db64fe02SNick Piggin if (IS_ERR(mem)) 973db64fe02SNick Piggin return NULL; 974db64fe02SNick Piggin addr = (unsigned long)mem; 975db64fe02SNick Piggin } else { 976db64fe02SNick Piggin struct vmap_area *va; 977db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 978db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 979db64fe02SNick Piggin if (IS_ERR(va)) 980db64fe02SNick Piggin return NULL; 981db64fe02SNick Piggin 982db64fe02SNick Piggin addr = va->va_start; 983db64fe02SNick Piggin mem = (void *)addr; 984db64fe02SNick Piggin } 985db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 986db64fe02SNick Piggin vm_unmap_ram(mem, count); 987db64fe02SNick Piggin return NULL; 988db64fe02SNick Piggin } 989db64fe02SNick Piggin return mem; 990db64fe02SNick Piggin } 991db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 992db64fe02SNick Piggin 993db64fe02SNick Piggin void __init vmalloc_init(void) 994db64fe02SNick Piggin { 995822c18f2SIvan Kokshaysky struct vmap_area *va; 996822c18f2SIvan Kokshaysky struct vm_struct *tmp; 997db64fe02SNick Piggin int i; 998db64fe02SNick Piggin 999db64fe02SNick Piggin for_each_possible_cpu(i) { 1000db64fe02SNick Piggin struct vmap_block_queue *vbq; 1001db64fe02SNick Piggin 1002db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1003db64fe02SNick Piggin spin_lock_init(&vbq->lock); 1004db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 1005db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->dirty); 1006db64fe02SNick Piggin vbq->nr_dirty = 0; 1007db64fe02SNick Piggin } 10089b463334SJeremy Fitzhardinge 1009822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1010822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 1011822c18f2SIvan Kokshaysky va = alloc_bootmem(sizeof(struct vmap_area)); 1012822c18f2SIvan Kokshaysky va->flags = tmp->flags | VM_VM_AREA; 1013822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1014822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1015822c18f2SIvan Kokshaysky __insert_vmap_area(va); 1016822c18f2SIvan Kokshaysky } 10179b463334SJeremy Fitzhardinge vmap_initialized = true; 1018db64fe02SNick Piggin } 1019db64fe02SNick Piggin 1020db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 1021db64fe02SNick Piggin { 1022db64fe02SNick Piggin unsigned long end = addr + size; 1023f6fcba70STejun Heo 1024f6fcba70STejun Heo flush_cache_vunmap(addr, end); 1025db64fe02SNick Piggin vunmap_page_range(addr, end); 1026db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 1027db64fe02SNick Piggin } 1028db64fe02SNick Piggin 1029db64fe02SNick Piggin int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1030db64fe02SNick Piggin { 1031db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 1032db64fe02SNick Piggin unsigned long end = addr + area->size - PAGE_SIZE; 1033db64fe02SNick Piggin int err; 1034db64fe02SNick Piggin 1035db64fe02SNick Piggin err = vmap_page_range(addr, end, prot, *pages); 1036db64fe02SNick Piggin if (err > 0) { 1037db64fe02SNick Piggin *pages += err; 1038db64fe02SNick Piggin err = 0; 1039db64fe02SNick Piggin } 1040db64fe02SNick Piggin 1041db64fe02SNick Piggin return err; 1042db64fe02SNick Piggin } 1043db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 1044db64fe02SNick Piggin 1045db64fe02SNick Piggin /*** Old vmalloc interfaces ***/ 1046db64fe02SNick Piggin DEFINE_RWLOCK(vmlist_lock); 1047db64fe02SNick Piggin struct vm_struct *vmlist; 1048db64fe02SNick Piggin 1049db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 1050db64fe02SNick Piggin unsigned long flags, unsigned long start, unsigned long end, 1051db64fe02SNick Piggin int node, gfp_t gfp_mask, void *caller) 1052db64fe02SNick Piggin { 1053db64fe02SNick Piggin static struct vmap_area *va; 1054db64fe02SNick Piggin struct vm_struct *area; 1055db64fe02SNick Piggin struct vm_struct *tmp, **p; 1056db64fe02SNick Piggin unsigned long align = 1; 10571da177e4SLinus Torvalds 105852fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 10591da177e4SLinus Torvalds if (flags & VM_IOREMAP) { 10601da177e4SLinus Torvalds int bit = fls(size); 10611da177e4SLinus Torvalds 10621da177e4SLinus Torvalds if (bit > IOREMAP_MAX_ORDER) 10631da177e4SLinus Torvalds bit = IOREMAP_MAX_ORDER; 10641da177e4SLinus Torvalds else if (bit < PAGE_SHIFT) 10651da177e4SLinus Torvalds bit = PAGE_SHIFT; 10661da177e4SLinus Torvalds 10671da177e4SLinus Torvalds align = 1ul << bit; 10681da177e4SLinus Torvalds } 1069db64fe02SNick Piggin 10701da177e4SLinus Torvalds size = PAGE_ALIGN(size); 107131be8309SOGAWA Hirofumi if (unlikely(!size)) 107231be8309SOGAWA Hirofumi return NULL; 10731da177e4SLinus Torvalds 10746cb06229SChristoph Lameter area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 10751da177e4SLinus Torvalds if (unlikely(!area)) 10761da177e4SLinus Torvalds return NULL; 10771da177e4SLinus Torvalds 10781da177e4SLinus Torvalds /* 10791da177e4SLinus Torvalds * We always allocate a guard page. 10801da177e4SLinus Torvalds */ 10811da177e4SLinus Torvalds size += PAGE_SIZE; 10821da177e4SLinus Torvalds 1083db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1084db64fe02SNick Piggin if (IS_ERR(va)) { 1085db64fe02SNick Piggin kfree(area); 1086db64fe02SNick Piggin return NULL; 10871da177e4SLinus Torvalds } 10881da177e4SLinus Torvalds 10891da177e4SLinus Torvalds area->flags = flags; 1090db64fe02SNick Piggin area->addr = (void *)va->va_start; 10911da177e4SLinus Torvalds area->size = size; 10921da177e4SLinus Torvalds area->pages = NULL; 10931da177e4SLinus Torvalds area->nr_pages = 0; 10941da177e4SLinus Torvalds area->phys_addr = 0; 109523016969SChristoph Lameter area->caller = caller; 1096db64fe02SNick Piggin va->private = area; 1097db64fe02SNick Piggin va->flags |= VM_VM_AREA; 1098db64fe02SNick Piggin 1099db64fe02SNick Piggin write_lock(&vmlist_lock); 1100db64fe02SNick Piggin for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1101db64fe02SNick Piggin if (tmp->addr >= area->addr) 1102db64fe02SNick Piggin break; 1103db64fe02SNick Piggin } 1104db64fe02SNick Piggin area->next = *p; 1105db64fe02SNick Piggin *p = area; 11061da177e4SLinus Torvalds write_unlock(&vmlist_lock); 11071da177e4SLinus Torvalds 11081da177e4SLinus Torvalds return area; 11091da177e4SLinus Torvalds } 11101da177e4SLinus Torvalds 1111930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1112930fc45aSChristoph Lameter unsigned long start, unsigned long end) 1113930fc45aSChristoph Lameter { 111423016969SChristoph Lameter return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 111523016969SChristoph Lameter __builtin_return_address(0)); 1116930fc45aSChristoph Lameter } 11175992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 1118930fc45aSChristoph Lameter 1119c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1120c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 1121c2968612SBenjamin Herrenschmidt void *caller) 1122c2968612SBenjamin Herrenschmidt { 1123c2968612SBenjamin Herrenschmidt return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1124c2968612SBenjamin Herrenschmidt caller); 1125c2968612SBenjamin Herrenschmidt } 1126c2968612SBenjamin Herrenschmidt 11271da177e4SLinus Torvalds /** 1128183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 11291da177e4SLinus Torvalds * @size: size of the area 11301da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 11311da177e4SLinus Torvalds * 11321da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 11331da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 11341da177e4SLinus Torvalds * on success or %NULL on failure. 11351da177e4SLinus Torvalds */ 11361da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 11371da177e4SLinus Torvalds { 113823016969SChristoph Lameter return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 113923016969SChristoph Lameter -1, GFP_KERNEL, __builtin_return_address(0)); 114023016969SChristoph Lameter } 114123016969SChristoph Lameter 114223016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 114323016969SChristoph Lameter void *caller) 114423016969SChristoph Lameter { 114523016969SChristoph Lameter return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 114623016969SChristoph Lameter -1, GFP_KERNEL, caller); 11471da177e4SLinus Torvalds } 11481da177e4SLinus Torvalds 114952fd24caSGiridhar Pemmasani struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 115052fd24caSGiridhar Pemmasani int node, gfp_t gfp_mask) 1151930fc45aSChristoph Lameter { 115252fd24caSGiridhar Pemmasani return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 115323016969SChristoph Lameter gfp_mask, __builtin_return_address(0)); 1154930fc45aSChristoph Lameter } 1155930fc45aSChristoph Lameter 1156db64fe02SNick Piggin static struct vm_struct *find_vm_area(const void *addr) 115783342314SNick Piggin { 1158db64fe02SNick Piggin struct vmap_area *va; 115983342314SNick Piggin 1160db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1161db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) 1162db64fe02SNick Piggin return va->private; 116383342314SNick Piggin 11647856dfebSAndi Kleen return NULL; 11657856dfebSAndi Kleen } 11667856dfebSAndi Kleen 11671da177e4SLinus Torvalds /** 1168183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 11691da177e4SLinus Torvalds * @addr: base address 11701da177e4SLinus Torvalds * 11711da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 11721da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 11737856dfebSAndi Kleen * on SMP machines, except for its size or flags. 11741da177e4SLinus Torvalds */ 1175b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 11761da177e4SLinus Torvalds { 1177db64fe02SNick Piggin struct vmap_area *va; 1178db64fe02SNick Piggin 1179db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1180db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) { 1181db64fe02SNick Piggin struct vm_struct *vm = va->private; 1182db64fe02SNick Piggin struct vm_struct *tmp, **p; 1183cd52858cSNick Piggin 1184cd52858cSNick Piggin vmap_debug_free_range(va->va_start, va->va_end); 1185db64fe02SNick Piggin free_unmap_vmap_area(va); 1186db64fe02SNick Piggin vm->size -= PAGE_SIZE; 1187db64fe02SNick Piggin 11881da177e4SLinus Torvalds write_lock(&vmlist_lock); 1189db64fe02SNick Piggin for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) 1190db64fe02SNick Piggin ; 1191db64fe02SNick Piggin *p = tmp->next; 11921da177e4SLinus Torvalds write_unlock(&vmlist_lock); 1193db64fe02SNick Piggin 1194db64fe02SNick Piggin return vm; 1195db64fe02SNick Piggin } 1196db64fe02SNick Piggin return NULL; 11971da177e4SLinus Torvalds } 11981da177e4SLinus Torvalds 1199b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 12001da177e4SLinus Torvalds { 12011da177e4SLinus Torvalds struct vm_struct *area; 12021da177e4SLinus Torvalds 12031da177e4SLinus Torvalds if (!addr) 12041da177e4SLinus Torvalds return; 12051da177e4SLinus Torvalds 12061da177e4SLinus Torvalds if ((PAGE_SIZE-1) & (unsigned long)addr) { 12074c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 12081da177e4SLinus Torvalds return; 12091da177e4SLinus Torvalds } 12101da177e4SLinus Torvalds 12111da177e4SLinus Torvalds area = remove_vm_area(addr); 12121da177e4SLinus Torvalds if (unlikely(!area)) { 12134c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 12141da177e4SLinus Torvalds addr); 12151da177e4SLinus Torvalds return; 12161da177e4SLinus Torvalds } 12171da177e4SLinus Torvalds 12189a11b49aSIngo Molnar debug_check_no_locks_freed(addr, area->size); 12193ac7fe5aSThomas Gleixner debug_check_no_obj_freed(addr, area->size); 12209a11b49aSIngo Molnar 12211da177e4SLinus Torvalds if (deallocate_pages) { 12221da177e4SLinus Torvalds int i; 12231da177e4SLinus Torvalds 12241da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1225bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 1226bf53d6f8SChristoph Lameter 1227bf53d6f8SChristoph Lameter BUG_ON(!page); 1228bf53d6f8SChristoph Lameter __free_page(page); 12291da177e4SLinus Torvalds } 12301da177e4SLinus Torvalds 12318757d5faSJan Kiszka if (area->flags & VM_VPAGES) 12321da177e4SLinus Torvalds vfree(area->pages); 12331da177e4SLinus Torvalds else 12341da177e4SLinus Torvalds kfree(area->pages); 12351da177e4SLinus Torvalds } 12361da177e4SLinus Torvalds 12371da177e4SLinus Torvalds kfree(area); 12381da177e4SLinus Torvalds return; 12391da177e4SLinus Torvalds } 12401da177e4SLinus Torvalds 12411da177e4SLinus Torvalds /** 12421da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 12431da177e4SLinus Torvalds * @addr: memory base address 12441da177e4SLinus Torvalds * 1245183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 124680e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 124780e93effSPekka Enberg * NULL, no operation is performed. 12481da177e4SLinus Torvalds * 124980e93effSPekka Enberg * Must not be called in interrupt context. 12501da177e4SLinus Torvalds */ 1251b3bdda02SChristoph Lameter void vfree(const void *addr) 12521da177e4SLinus Torvalds { 12531da177e4SLinus Torvalds BUG_ON(in_interrupt()); 12541da177e4SLinus Torvalds __vunmap(addr, 1); 12551da177e4SLinus Torvalds } 12561da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds /** 12591da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 12601da177e4SLinus Torvalds * @addr: memory base address 12611da177e4SLinus Torvalds * 12621da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 12631da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 12641da177e4SLinus Torvalds * 126580e93effSPekka Enberg * Must not be called in interrupt context. 12661da177e4SLinus Torvalds */ 1267b3bdda02SChristoph Lameter void vunmap(const void *addr) 12681da177e4SLinus Torvalds { 12691da177e4SLinus Torvalds BUG_ON(in_interrupt()); 12701da177e4SLinus Torvalds __vunmap(addr, 0); 12711da177e4SLinus Torvalds } 12721da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 12731da177e4SLinus Torvalds 12741da177e4SLinus Torvalds /** 12751da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 12761da177e4SLinus Torvalds * @pages: array of page pointers 12771da177e4SLinus Torvalds * @count: number of pages to map 12781da177e4SLinus Torvalds * @flags: vm_area->flags 12791da177e4SLinus Torvalds * @prot: page protection for the mapping 12801da177e4SLinus Torvalds * 12811da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 12821da177e4SLinus Torvalds * space. 12831da177e4SLinus Torvalds */ 12841da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 12851da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 12861da177e4SLinus Torvalds { 12871da177e4SLinus Torvalds struct vm_struct *area; 12881da177e4SLinus Torvalds 12891da177e4SLinus Torvalds if (count > num_physpages) 12901da177e4SLinus Torvalds return NULL; 12911da177e4SLinus Torvalds 129223016969SChristoph Lameter area = get_vm_area_caller((count << PAGE_SHIFT), flags, 129323016969SChristoph Lameter __builtin_return_address(0)); 12941da177e4SLinus Torvalds if (!area) 12951da177e4SLinus Torvalds return NULL; 129623016969SChristoph Lameter 12971da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) { 12981da177e4SLinus Torvalds vunmap(area->addr); 12991da177e4SLinus Torvalds return NULL; 13001da177e4SLinus Torvalds } 13011da177e4SLinus Torvalds 13021da177e4SLinus Torvalds return area->addr; 13031da177e4SLinus Torvalds } 13041da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 13051da177e4SLinus Torvalds 1306db64fe02SNick Piggin static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1307db64fe02SNick Piggin int node, void *caller); 1308e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 130923016969SChristoph Lameter pgprot_t prot, int node, void *caller) 13101da177e4SLinus Torvalds { 13111da177e4SLinus Torvalds struct page **pages; 13121da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 13131da177e4SLinus Torvalds 13141da177e4SLinus Torvalds nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 13151da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 13161da177e4SLinus Torvalds 13171da177e4SLinus Torvalds area->nr_pages = nr_pages; 13181da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 13198757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 132094f6030cSChristoph Lameter pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 132123016969SChristoph Lameter PAGE_KERNEL, node, caller); 13228757d5faSJan Kiszka area->flags |= VM_VPAGES; 1323286e1ea3SAndrew Morton } else { 1324286e1ea3SAndrew Morton pages = kmalloc_node(array_size, 13256cb06229SChristoph Lameter (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, 1326286e1ea3SAndrew Morton node); 1327286e1ea3SAndrew Morton } 13281da177e4SLinus Torvalds area->pages = pages; 132923016969SChristoph Lameter area->caller = caller; 13301da177e4SLinus Torvalds if (!area->pages) { 13311da177e4SLinus Torvalds remove_vm_area(area->addr); 13321da177e4SLinus Torvalds kfree(area); 13331da177e4SLinus Torvalds return NULL; 13341da177e4SLinus Torvalds } 13351da177e4SLinus Torvalds 13361da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1337bf53d6f8SChristoph Lameter struct page *page; 1338bf53d6f8SChristoph Lameter 1339930fc45aSChristoph Lameter if (node < 0) 1340bf53d6f8SChristoph Lameter page = alloc_page(gfp_mask); 1341930fc45aSChristoph Lameter else 1342bf53d6f8SChristoph Lameter page = alloc_pages_node(node, gfp_mask, 0); 1343bf53d6f8SChristoph Lameter 1344bf53d6f8SChristoph Lameter if (unlikely(!page)) { 13451da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 13461da177e4SLinus Torvalds area->nr_pages = i; 13471da177e4SLinus Torvalds goto fail; 13481da177e4SLinus Torvalds } 1349bf53d6f8SChristoph Lameter area->pages[i] = page; 13501da177e4SLinus Torvalds } 13511da177e4SLinus Torvalds 13521da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) 13531da177e4SLinus Torvalds goto fail; 13541da177e4SLinus Torvalds return area->addr; 13551da177e4SLinus Torvalds 13561da177e4SLinus Torvalds fail: 13571da177e4SLinus Torvalds vfree(area->addr); 13581da177e4SLinus Torvalds return NULL; 13591da177e4SLinus Torvalds } 13601da177e4SLinus Torvalds 1361930fc45aSChristoph Lameter void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 1362930fc45aSChristoph Lameter { 136323016969SChristoph Lameter return __vmalloc_area_node(area, gfp_mask, prot, -1, 136423016969SChristoph Lameter __builtin_return_address(0)); 1365930fc45aSChristoph Lameter } 1366930fc45aSChristoph Lameter 13671da177e4SLinus Torvalds /** 1368930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 13691da177e4SLinus Torvalds * @size: allocation size 13701da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 13711da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 1372d44e0780SRandy Dunlap * @node: node to use for allocation or -1 1373c85d194bSRandy Dunlap * @caller: caller's return address 13741da177e4SLinus Torvalds * 13751da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 13761da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 13771da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 13781da177e4SLinus Torvalds */ 1379b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 138023016969SChristoph Lameter int node, void *caller) 13811da177e4SLinus Torvalds { 13821da177e4SLinus Torvalds struct vm_struct *area; 13831da177e4SLinus Torvalds 13841da177e4SLinus Torvalds size = PAGE_ALIGN(size); 13851da177e4SLinus Torvalds if (!size || (size >> PAGE_SHIFT) > num_physpages) 13861da177e4SLinus Torvalds return NULL; 13871da177e4SLinus Torvalds 138823016969SChristoph Lameter area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 138923016969SChristoph Lameter node, gfp_mask, caller); 139023016969SChristoph Lameter 13911da177e4SLinus Torvalds if (!area) 13921da177e4SLinus Torvalds return NULL; 13931da177e4SLinus Torvalds 139423016969SChristoph Lameter return __vmalloc_area_node(area, gfp_mask, prot, node, caller); 13951da177e4SLinus Torvalds } 13961da177e4SLinus Torvalds 1397930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1398930fc45aSChristoph Lameter { 139923016969SChristoph Lameter return __vmalloc_node(size, gfp_mask, prot, -1, 140023016969SChristoph Lameter __builtin_return_address(0)); 1401930fc45aSChristoph Lameter } 14021da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 14031da177e4SLinus Torvalds 14041da177e4SLinus Torvalds /** 14051da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 14061da177e4SLinus Torvalds * @size: allocation size 14071da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 14081da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 14091da177e4SLinus Torvalds * 1410c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 14111da177e4SLinus Torvalds * use __vmalloc() instead. 14121da177e4SLinus Torvalds */ 14131da177e4SLinus Torvalds void *vmalloc(unsigned long size) 14141da177e4SLinus Torvalds { 141523016969SChristoph Lameter return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 141623016969SChristoph Lameter -1, __builtin_return_address(0)); 14171da177e4SLinus Torvalds } 14181da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 14191da177e4SLinus Torvalds 1420930fc45aSChristoph Lameter /** 1421ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 142283342314SNick Piggin * @size: allocation size 1423ead04089SRolf Eike Beer * 1424ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 1425ead04089SRolf Eike Beer * without leaking data. 142683342314SNick Piggin */ 142783342314SNick Piggin void *vmalloc_user(unsigned long size) 142883342314SNick Piggin { 142983342314SNick Piggin struct vm_struct *area; 143083342314SNick Piggin void *ret; 143183342314SNick Piggin 143284877848SGlauber Costa ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 143384877848SGlauber Costa PAGE_KERNEL, -1, __builtin_return_address(0)); 14342b4ac44eSEric Dumazet if (ret) { 1435db64fe02SNick Piggin area = find_vm_area(ret); 143683342314SNick Piggin area->flags |= VM_USERMAP; 14372b4ac44eSEric Dumazet } 143883342314SNick Piggin return ret; 143983342314SNick Piggin } 144083342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 144183342314SNick Piggin 144283342314SNick Piggin /** 1443930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 1444930fc45aSChristoph Lameter * @size: allocation size 1445d44e0780SRandy Dunlap * @node: numa node 1446930fc45aSChristoph Lameter * 1447930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 1448930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 1449930fc45aSChristoph Lameter * 1450c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 1451930fc45aSChristoph Lameter * use __vmalloc() instead. 1452930fc45aSChristoph Lameter */ 1453930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 1454930fc45aSChristoph Lameter { 145523016969SChristoph Lameter return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 145623016969SChristoph Lameter node, __builtin_return_address(0)); 1457930fc45aSChristoph Lameter } 1458930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 1459930fc45aSChristoph Lameter 14604dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC 14614dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL 14624dc3b16bSPavel Pisa #endif 14634dc3b16bSPavel Pisa 14641da177e4SLinus Torvalds /** 14651da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 14661da177e4SLinus Torvalds * @size: allocation size 14671da177e4SLinus Torvalds * 14681da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 14691da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 14701da177e4SLinus Torvalds * executable kernel virtual space. 14711da177e4SLinus Torvalds * 1472c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 14731da177e4SLinus Torvalds * use __vmalloc() instead. 14741da177e4SLinus Torvalds */ 14751da177e4SLinus Torvalds 14761da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 14771da177e4SLinus Torvalds { 147884877848SGlauber Costa return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 147984877848SGlauber Costa -1, __builtin_return_address(0)); 14801da177e4SLinus Torvalds } 14811da177e4SLinus Torvalds 14820d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 14837ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 14840d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 14857ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 14860d08e0d3SAndi Kleen #else 14870d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL 14880d08e0d3SAndi Kleen #endif 14890d08e0d3SAndi Kleen 14901da177e4SLinus Torvalds /** 14911da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 14921da177e4SLinus Torvalds * @size: allocation size 14931da177e4SLinus Torvalds * 14941da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 14951da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 14961da177e4SLinus Torvalds */ 14971da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 14981da177e4SLinus Torvalds { 149984877848SGlauber Costa return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, 150084877848SGlauber Costa -1, __builtin_return_address(0)); 15011da177e4SLinus Torvalds } 15021da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 15031da177e4SLinus Torvalds 150483342314SNick Piggin /** 1505ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 150683342314SNick Piggin * @size: allocation size 1507ead04089SRolf Eike Beer * 1508ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 1509ead04089SRolf Eike Beer * mapped to userspace without leaking data. 151083342314SNick Piggin */ 151183342314SNick Piggin void *vmalloc_32_user(unsigned long size) 151283342314SNick Piggin { 151383342314SNick Piggin struct vm_struct *area; 151483342314SNick Piggin void *ret; 151583342314SNick Piggin 151684877848SGlauber Costa ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 151784877848SGlauber Costa -1, __builtin_return_address(0)); 15182b4ac44eSEric Dumazet if (ret) { 1519db64fe02SNick Piggin area = find_vm_area(ret); 152083342314SNick Piggin area->flags |= VM_USERMAP; 15212b4ac44eSEric Dumazet } 152283342314SNick Piggin return ret; 152383342314SNick Piggin } 152483342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 152583342314SNick Piggin 15261da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 15271da177e4SLinus Torvalds { 15281da177e4SLinus Torvalds struct vm_struct *tmp; 15291da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 15301da177e4SLinus Torvalds unsigned long n; 15311da177e4SLinus Torvalds 15321da177e4SLinus Torvalds /* Don't allow overflow */ 15331da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 15341da177e4SLinus Torvalds count = -(unsigned long) addr; 15351da177e4SLinus Torvalds 15361da177e4SLinus Torvalds read_lock(&vmlist_lock); 15371da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 15381da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 15391da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 15401da177e4SLinus Torvalds continue; 15411da177e4SLinus Torvalds while (addr < vaddr) { 15421da177e4SLinus Torvalds if (count == 0) 15431da177e4SLinus Torvalds goto finished; 15441da177e4SLinus Torvalds *buf = '\0'; 15451da177e4SLinus Torvalds buf++; 15461da177e4SLinus Torvalds addr++; 15471da177e4SLinus Torvalds count--; 15481da177e4SLinus Torvalds } 15491da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 15501da177e4SLinus Torvalds do { 15511da177e4SLinus Torvalds if (count == 0) 15521da177e4SLinus Torvalds goto finished; 15531da177e4SLinus Torvalds *buf = *addr; 15541da177e4SLinus Torvalds buf++; 15551da177e4SLinus Torvalds addr++; 15561da177e4SLinus Torvalds count--; 15571da177e4SLinus Torvalds } while (--n > 0); 15581da177e4SLinus Torvalds } 15591da177e4SLinus Torvalds finished: 15601da177e4SLinus Torvalds read_unlock(&vmlist_lock); 15611da177e4SLinus Torvalds return buf - buf_start; 15621da177e4SLinus Torvalds } 15631da177e4SLinus Torvalds 15641da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 15651da177e4SLinus Torvalds { 15661da177e4SLinus Torvalds struct vm_struct *tmp; 15671da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 15681da177e4SLinus Torvalds unsigned long n; 15691da177e4SLinus Torvalds 15701da177e4SLinus Torvalds /* Don't allow overflow */ 15711da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 15721da177e4SLinus Torvalds count = -(unsigned long) addr; 15731da177e4SLinus Torvalds 15741da177e4SLinus Torvalds read_lock(&vmlist_lock); 15751da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 15761da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 15771da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 15781da177e4SLinus Torvalds continue; 15791da177e4SLinus Torvalds while (addr < vaddr) { 15801da177e4SLinus Torvalds if (count == 0) 15811da177e4SLinus Torvalds goto finished; 15821da177e4SLinus Torvalds buf++; 15831da177e4SLinus Torvalds addr++; 15841da177e4SLinus Torvalds count--; 15851da177e4SLinus Torvalds } 15861da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 15871da177e4SLinus Torvalds do { 15881da177e4SLinus Torvalds if (count == 0) 15891da177e4SLinus Torvalds goto finished; 15901da177e4SLinus Torvalds *addr = *buf; 15911da177e4SLinus Torvalds buf++; 15921da177e4SLinus Torvalds addr++; 15931da177e4SLinus Torvalds count--; 15941da177e4SLinus Torvalds } while (--n > 0); 15951da177e4SLinus Torvalds } 15961da177e4SLinus Torvalds finished: 15971da177e4SLinus Torvalds read_unlock(&vmlist_lock); 15981da177e4SLinus Torvalds return buf - buf_start; 15991da177e4SLinus Torvalds } 160083342314SNick Piggin 160183342314SNick Piggin /** 160283342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 160383342314SNick Piggin * @vma: vma to cover (map full range of vma) 160483342314SNick Piggin * @addr: vmalloc memory 160583342314SNick Piggin * @pgoff: number of pages into addr before first page to map 16067682486bSRandy Dunlap * 16077682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 160883342314SNick Piggin * 160983342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 161083342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 161183342314SNick Piggin * that criteria isn't met. 161283342314SNick Piggin * 161372fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 161483342314SNick Piggin */ 161583342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 161683342314SNick Piggin unsigned long pgoff) 161783342314SNick Piggin { 161883342314SNick Piggin struct vm_struct *area; 161983342314SNick Piggin unsigned long uaddr = vma->vm_start; 162083342314SNick Piggin unsigned long usize = vma->vm_end - vma->vm_start; 162183342314SNick Piggin 162283342314SNick Piggin if ((PAGE_SIZE-1) & (unsigned long)addr) 162383342314SNick Piggin return -EINVAL; 162483342314SNick Piggin 1625db64fe02SNick Piggin area = find_vm_area(addr); 162683342314SNick Piggin if (!area) 1627db64fe02SNick Piggin return -EINVAL; 162883342314SNick Piggin 162983342314SNick Piggin if (!(area->flags & VM_USERMAP)) 1630db64fe02SNick Piggin return -EINVAL; 163183342314SNick Piggin 163283342314SNick Piggin if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 1633db64fe02SNick Piggin return -EINVAL; 163483342314SNick Piggin 163583342314SNick Piggin addr += pgoff << PAGE_SHIFT; 163683342314SNick Piggin do { 163783342314SNick Piggin struct page *page = vmalloc_to_page(addr); 1638db64fe02SNick Piggin int ret; 1639db64fe02SNick Piggin 164083342314SNick Piggin ret = vm_insert_page(vma, uaddr, page); 164183342314SNick Piggin if (ret) 164283342314SNick Piggin return ret; 164383342314SNick Piggin 164483342314SNick Piggin uaddr += PAGE_SIZE; 164583342314SNick Piggin addr += PAGE_SIZE; 164683342314SNick Piggin usize -= PAGE_SIZE; 164783342314SNick Piggin } while (usize > 0); 164883342314SNick Piggin 164983342314SNick Piggin /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 165083342314SNick Piggin vma->vm_flags |= VM_RESERVED; 165183342314SNick Piggin 1652db64fe02SNick Piggin return 0; 165383342314SNick Piggin } 165483342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 165583342314SNick Piggin 16561eeb66a1SChristoph Hellwig /* 16571eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 16581eeb66a1SChristoph Hellwig * have one. 16591eeb66a1SChristoph Hellwig */ 16601eeb66a1SChristoph Hellwig void __attribute__((weak)) vmalloc_sync_all(void) 16611eeb66a1SChristoph Hellwig { 16621eeb66a1SChristoph Hellwig } 16635f4352fbSJeremy Fitzhardinge 16645f4352fbSJeremy Fitzhardinge 16652f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 16665f4352fbSJeremy Fitzhardinge { 16675f4352fbSJeremy Fitzhardinge /* apply_to_page_range() does all the hard work. */ 16685f4352fbSJeremy Fitzhardinge return 0; 16695f4352fbSJeremy Fitzhardinge } 16705f4352fbSJeremy Fitzhardinge 16715f4352fbSJeremy Fitzhardinge /** 16725f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 16735f4352fbSJeremy Fitzhardinge * @size: size of the area 16747682486bSRandy Dunlap * 16757682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 16765f4352fbSJeremy Fitzhardinge * 16775f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 16785f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 16795f4352fbSJeremy Fitzhardinge * are created. If the kernel address space is not shared 16805f4352fbSJeremy Fitzhardinge * between processes, it syncs the pagetable across all 16815f4352fbSJeremy Fitzhardinge * processes. 16825f4352fbSJeremy Fitzhardinge */ 16835f4352fbSJeremy Fitzhardinge struct vm_struct *alloc_vm_area(size_t size) 16845f4352fbSJeremy Fitzhardinge { 16855f4352fbSJeremy Fitzhardinge struct vm_struct *area; 16865f4352fbSJeremy Fitzhardinge 168723016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 168823016969SChristoph Lameter __builtin_return_address(0)); 16895f4352fbSJeremy Fitzhardinge if (area == NULL) 16905f4352fbSJeremy Fitzhardinge return NULL; 16915f4352fbSJeremy Fitzhardinge 16925f4352fbSJeremy Fitzhardinge /* 16935f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 16945f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 16955f4352fbSJeremy Fitzhardinge */ 16965f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 16975f4352fbSJeremy Fitzhardinge area->size, f, NULL)) { 16985f4352fbSJeremy Fitzhardinge free_vm_area(area); 16995f4352fbSJeremy Fitzhardinge return NULL; 17005f4352fbSJeremy Fitzhardinge } 17015f4352fbSJeremy Fitzhardinge 17025f4352fbSJeremy Fitzhardinge /* Make sure the pagetables are constructed in process kernel 17035f4352fbSJeremy Fitzhardinge mappings */ 17045f4352fbSJeremy Fitzhardinge vmalloc_sync_all(); 17055f4352fbSJeremy Fitzhardinge 17065f4352fbSJeremy Fitzhardinge return area; 17075f4352fbSJeremy Fitzhardinge } 17085f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 17095f4352fbSJeremy Fitzhardinge 17105f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 17115f4352fbSJeremy Fitzhardinge { 17125f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 17135f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 17145f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 17155f4352fbSJeremy Fitzhardinge kfree(area); 17165f4352fbSJeremy Fitzhardinge } 17175f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 1718a10aa579SChristoph Lameter 1719a10aa579SChristoph Lameter 1720a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 1721a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 1722a10aa579SChristoph Lameter { 1723a10aa579SChristoph Lameter loff_t n = *pos; 1724a10aa579SChristoph Lameter struct vm_struct *v; 1725a10aa579SChristoph Lameter 1726a10aa579SChristoph Lameter read_lock(&vmlist_lock); 1727a10aa579SChristoph Lameter v = vmlist; 1728a10aa579SChristoph Lameter while (n > 0 && v) { 1729a10aa579SChristoph Lameter n--; 1730a10aa579SChristoph Lameter v = v->next; 1731a10aa579SChristoph Lameter } 1732a10aa579SChristoph Lameter if (!n) 1733a10aa579SChristoph Lameter return v; 1734a10aa579SChristoph Lameter 1735a10aa579SChristoph Lameter return NULL; 1736a10aa579SChristoph Lameter 1737a10aa579SChristoph Lameter } 1738a10aa579SChristoph Lameter 1739a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 1740a10aa579SChristoph Lameter { 1741a10aa579SChristoph Lameter struct vm_struct *v = p; 1742a10aa579SChristoph Lameter 1743a10aa579SChristoph Lameter ++*pos; 1744a10aa579SChristoph Lameter return v->next; 1745a10aa579SChristoph Lameter } 1746a10aa579SChristoph Lameter 1747a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 1748a10aa579SChristoph Lameter { 1749a10aa579SChristoph Lameter read_unlock(&vmlist_lock); 1750a10aa579SChristoph Lameter } 1751a10aa579SChristoph Lameter 1752a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 1753a47a126aSEric Dumazet { 1754a47a126aSEric Dumazet if (NUMA_BUILD) { 1755a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 1756a47a126aSEric Dumazet 1757a47a126aSEric Dumazet if (!counters) 1758a47a126aSEric Dumazet return; 1759a47a126aSEric Dumazet 1760a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 1761a47a126aSEric Dumazet 1762a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 1763a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 1764a47a126aSEric Dumazet 1765a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 1766a47a126aSEric Dumazet if (counters[nr]) 1767a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 1768a47a126aSEric Dumazet } 1769a47a126aSEric Dumazet } 1770a47a126aSEric Dumazet 1771a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 1772a10aa579SChristoph Lameter { 1773a10aa579SChristoph Lameter struct vm_struct *v = p; 1774a10aa579SChristoph Lameter 1775a10aa579SChristoph Lameter seq_printf(m, "0x%p-0x%p %7ld", 1776a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 1777a10aa579SChristoph Lameter 177823016969SChristoph Lameter if (v->caller) { 17799c246247SHugh Dickins char buff[KSYM_SYMBOL_LEN]; 178023016969SChristoph Lameter 178123016969SChristoph Lameter seq_putc(m, ' '); 178223016969SChristoph Lameter sprint_symbol(buff, (unsigned long)v->caller); 178323016969SChristoph Lameter seq_puts(m, buff); 178423016969SChristoph Lameter } 178523016969SChristoph Lameter 1786a10aa579SChristoph Lameter if (v->nr_pages) 1787a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 1788a10aa579SChristoph Lameter 1789a10aa579SChristoph Lameter if (v->phys_addr) 1790a10aa579SChristoph Lameter seq_printf(m, " phys=%lx", v->phys_addr); 1791a10aa579SChristoph Lameter 1792a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 1793a10aa579SChristoph Lameter seq_printf(m, " ioremap"); 1794a10aa579SChristoph Lameter 1795a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 1796a10aa579SChristoph Lameter seq_printf(m, " vmalloc"); 1797a10aa579SChristoph Lameter 1798a10aa579SChristoph Lameter if (v->flags & VM_MAP) 1799a10aa579SChristoph Lameter seq_printf(m, " vmap"); 1800a10aa579SChristoph Lameter 1801a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 1802a10aa579SChristoph Lameter seq_printf(m, " user"); 1803a10aa579SChristoph Lameter 1804a10aa579SChristoph Lameter if (v->flags & VM_VPAGES) 1805a10aa579SChristoph Lameter seq_printf(m, " vpages"); 1806a10aa579SChristoph Lameter 1807a47a126aSEric Dumazet show_numa_info(m, v); 1808a10aa579SChristoph Lameter seq_putc(m, '\n'); 1809a10aa579SChristoph Lameter return 0; 1810a10aa579SChristoph Lameter } 1811a10aa579SChristoph Lameter 18125f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 1813a10aa579SChristoph Lameter .start = s_start, 1814a10aa579SChristoph Lameter .next = s_next, 1815a10aa579SChristoph Lameter .stop = s_stop, 1816a10aa579SChristoph Lameter .show = s_show, 1817a10aa579SChristoph Lameter }; 18185f6a6a9cSAlexey Dobriyan 18195f6a6a9cSAlexey Dobriyan static int vmalloc_open(struct inode *inode, struct file *file) 18205f6a6a9cSAlexey Dobriyan { 18215f6a6a9cSAlexey Dobriyan unsigned int *ptr = NULL; 18225f6a6a9cSAlexey Dobriyan int ret; 18235f6a6a9cSAlexey Dobriyan 18245f6a6a9cSAlexey Dobriyan if (NUMA_BUILD) 18255f6a6a9cSAlexey Dobriyan ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 18265f6a6a9cSAlexey Dobriyan ret = seq_open(file, &vmalloc_op); 18275f6a6a9cSAlexey Dobriyan if (!ret) { 18285f6a6a9cSAlexey Dobriyan struct seq_file *m = file->private_data; 18295f6a6a9cSAlexey Dobriyan m->private = ptr; 18305f6a6a9cSAlexey Dobriyan } else 18315f6a6a9cSAlexey Dobriyan kfree(ptr); 18325f6a6a9cSAlexey Dobriyan return ret; 18335f6a6a9cSAlexey Dobriyan } 18345f6a6a9cSAlexey Dobriyan 18355f6a6a9cSAlexey Dobriyan static const struct file_operations proc_vmalloc_operations = { 18365f6a6a9cSAlexey Dobriyan .open = vmalloc_open, 18375f6a6a9cSAlexey Dobriyan .read = seq_read, 18385f6a6a9cSAlexey Dobriyan .llseek = seq_lseek, 18395f6a6a9cSAlexey Dobriyan .release = seq_release_private, 18405f6a6a9cSAlexey Dobriyan }; 18415f6a6a9cSAlexey Dobriyan 18425f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 18435f6a6a9cSAlexey Dobriyan { 18445f6a6a9cSAlexey Dobriyan proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 18455f6a6a9cSAlexey Dobriyan return 0; 18465f6a6a9cSAlexey Dobriyan } 18475f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 1848a10aa579SChristoph Lameter #endif 1849a10aa579SChristoph Lameter 1850