11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 15c3edc401SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/spinlock.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 21868b104dSRick Edgecombe #include <linux/set_memory.h> 223ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2323016969SChristoph Lameter #include <linux/kallsyms.h> 24db64fe02SNick Piggin #include <linux/list.h> 254da56b99SChris Wilson #include <linux/notifier.h> 26db64fe02SNick Piggin #include <linux/rbtree.h> 27db64fe02SNick Piggin #include <linux/radix-tree.h> 28db64fe02SNick Piggin #include <linux/rcupdate.h> 29f0aa6617STejun Heo #include <linux/pfn.h> 3089219d37SCatalin Marinas #include <linux/kmemleak.h> 3160063497SArun Sharma #include <linux/atomic.h> 323b32123dSGideon Israel Dsouza #include <linux/compiler.h> 3332fcfd40SAl Viro #include <linux/llist.h> 340f616be1SToshi Kani #include <linux/bitops.h> 353b32123dSGideon Israel Dsouza 367c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 371da177e4SLinus Torvalds #include <asm/tlbflush.h> 382dca6999SDavid Miller #include <asm/shmparam.h> 391da177e4SLinus Torvalds 40dd56b046SMel Gorman #include "internal.h" 41dd56b046SMel Gorman 4232fcfd40SAl Viro struct vfree_deferred { 4332fcfd40SAl Viro struct llist_head list; 4432fcfd40SAl Viro struct work_struct wq; 4532fcfd40SAl Viro }; 4632fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 4732fcfd40SAl Viro 4832fcfd40SAl Viro static void __vunmap(const void *, int); 4932fcfd40SAl Viro 5032fcfd40SAl Viro static void free_work(struct work_struct *w) 5132fcfd40SAl Viro { 5232fcfd40SAl Viro struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 53894e58c1SByungchul Park struct llist_node *t, *llnode; 54894e58c1SByungchul Park 55894e58c1SByungchul Park llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 56894e58c1SByungchul Park __vunmap((void *)llnode, 1); 5732fcfd40SAl Viro } 5832fcfd40SAl Viro 59db64fe02SNick Piggin /*** Page table manipulation functions ***/ 60b221385bSAdrian Bunk 611da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 621da177e4SLinus Torvalds { 631da177e4SLinus Torvalds pte_t *pte; 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 661da177e4SLinus Torvalds do { 671da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 681da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 691da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 701da177e4SLinus Torvalds } 711da177e4SLinus Torvalds 72db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 731da177e4SLinus Torvalds { 741da177e4SLinus Torvalds pmd_t *pmd; 751da177e4SLinus Torvalds unsigned long next; 761da177e4SLinus Torvalds 771da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 781da177e4SLinus Torvalds do { 791da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 80b9820d8fSToshi Kani if (pmd_clear_huge(pmd)) 81b9820d8fSToshi Kani continue; 821da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 831da177e4SLinus Torvalds continue; 841da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 851da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 861da177e4SLinus Torvalds } 871da177e4SLinus Torvalds 88c2febafcSKirill A. Shutemov static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) 891da177e4SLinus Torvalds { 901da177e4SLinus Torvalds pud_t *pud; 911da177e4SLinus Torvalds unsigned long next; 921da177e4SLinus Torvalds 93c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 941da177e4SLinus Torvalds do { 951da177e4SLinus Torvalds next = pud_addr_end(addr, end); 96b9820d8fSToshi Kani if (pud_clear_huge(pud)) 97b9820d8fSToshi Kani continue; 981da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 991da177e4SLinus Torvalds continue; 1001da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 1011da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1021da177e4SLinus Torvalds } 1031da177e4SLinus Torvalds 104c2febafcSKirill A. Shutemov static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) 105c2febafcSKirill A. Shutemov { 106c2febafcSKirill A. Shutemov p4d_t *p4d; 107c2febafcSKirill A. Shutemov unsigned long next; 108c2febafcSKirill A. Shutemov 109c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 110c2febafcSKirill A. Shutemov do { 111c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 112c2febafcSKirill A. Shutemov if (p4d_clear_huge(p4d)) 113c2febafcSKirill A. Shutemov continue; 114c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 115c2febafcSKirill A. Shutemov continue; 116c2febafcSKirill A. Shutemov vunmap_pud_range(p4d, addr, next); 117c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 118c2febafcSKirill A. Shutemov } 119c2febafcSKirill A. Shutemov 120db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 1211da177e4SLinus Torvalds { 1221da177e4SLinus Torvalds pgd_t *pgd; 1231da177e4SLinus Torvalds unsigned long next; 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds BUG_ON(addr >= end); 1261da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1271da177e4SLinus Torvalds do { 1281da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 1291da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 1301da177e4SLinus Torvalds continue; 131c2febafcSKirill A. Shutemov vunmap_p4d_range(pgd, addr, next); 1321da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1331da177e4SLinus Torvalds } 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 136db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1371da177e4SLinus Torvalds { 1381da177e4SLinus Torvalds pte_t *pte; 1391da177e4SLinus Torvalds 140db64fe02SNick Piggin /* 141db64fe02SNick Piggin * nr is a running index into the array which helps higher level 142db64fe02SNick Piggin * callers keep track of where we're up to. 143db64fe02SNick Piggin */ 144db64fe02SNick Piggin 145872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1461da177e4SLinus Torvalds if (!pte) 1471da177e4SLinus Torvalds return -ENOMEM; 1481da177e4SLinus Torvalds do { 149db64fe02SNick Piggin struct page *page = pages[*nr]; 150db64fe02SNick Piggin 151db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 152db64fe02SNick Piggin return -EBUSY; 153db64fe02SNick Piggin if (WARN_ON(!page)) 1541da177e4SLinus Torvalds return -ENOMEM; 1551da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 156db64fe02SNick Piggin (*nr)++; 1571da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1581da177e4SLinus Torvalds return 0; 1591da177e4SLinus Torvalds } 1601da177e4SLinus Torvalds 161db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 162db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1631da177e4SLinus Torvalds { 1641da177e4SLinus Torvalds pmd_t *pmd; 1651da177e4SLinus Torvalds unsigned long next; 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1681da177e4SLinus Torvalds if (!pmd) 1691da177e4SLinus Torvalds return -ENOMEM; 1701da177e4SLinus Torvalds do { 1711da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 172db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1731da177e4SLinus Torvalds return -ENOMEM; 1741da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1751da177e4SLinus Torvalds return 0; 1761da177e4SLinus Torvalds } 1771da177e4SLinus Torvalds 178c2febafcSKirill A. Shutemov static int vmap_pud_range(p4d_t *p4d, unsigned long addr, 179db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1801da177e4SLinus Torvalds { 1811da177e4SLinus Torvalds pud_t *pud; 1821da177e4SLinus Torvalds unsigned long next; 1831da177e4SLinus Torvalds 184c2febafcSKirill A. Shutemov pud = pud_alloc(&init_mm, p4d, addr); 1851da177e4SLinus Torvalds if (!pud) 1861da177e4SLinus Torvalds return -ENOMEM; 1871da177e4SLinus Torvalds do { 1881da177e4SLinus Torvalds next = pud_addr_end(addr, end); 189db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 1901da177e4SLinus Torvalds return -ENOMEM; 1911da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1921da177e4SLinus Torvalds return 0; 1931da177e4SLinus Torvalds } 1941da177e4SLinus Torvalds 195c2febafcSKirill A. Shutemov static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, 196c2febafcSKirill A. Shutemov unsigned long end, pgprot_t prot, struct page **pages, int *nr) 197c2febafcSKirill A. Shutemov { 198c2febafcSKirill A. Shutemov p4d_t *p4d; 199c2febafcSKirill A. Shutemov unsigned long next; 200c2febafcSKirill A. Shutemov 201c2febafcSKirill A. Shutemov p4d = p4d_alloc(&init_mm, pgd, addr); 202c2febafcSKirill A. Shutemov if (!p4d) 203c2febafcSKirill A. Shutemov return -ENOMEM; 204c2febafcSKirill A. Shutemov do { 205c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 206c2febafcSKirill A. Shutemov if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) 207c2febafcSKirill A. Shutemov return -ENOMEM; 208c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 209c2febafcSKirill A. Shutemov return 0; 210c2febafcSKirill A. Shutemov } 211c2febafcSKirill A. Shutemov 212db64fe02SNick Piggin /* 213db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 214db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 215db64fe02SNick Piggin * 216db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 217db64fe02SNick Piggin */ 2188fc48985STejun Heo static int vmap_page_range_noflush(unsigned long start, unsigned long end, 219db64fe02SNick Piggin pgprot_t prot, struct page **pages) 2201da177e4SLinus Torvalds { 2211da177e4SLinus Torvalds pgd_t *pgd; 2221da177e4SLinus Torvalds unsigned long next; 2232e4e27c7SAdam Lackorzynski unsigned long addr = start; 224db64fe02SNick Piggin int err = 0; 225db64fe02SNick Piggin int nr = 0; 2261da177e4SLinus Torvalds 2271da177e4SLinus Torvalds BUG_ON(addr >= end); 2281da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 2291da177e4SLinus Torvalds do { 2301da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 231c2febafcSKirill A. Shutemov err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); 2321da177e4SLinus Torvalds if (err) 233bf88c8c8SFigo.zhang return err; 2341da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 235db64fe02SNick Piggin 236db64fe02SNick Piggin return nr; 2371da177e4SLinus Torvalds } 2381da177e4SLinus Torvalds 2398fc48985STejun Heo static int vmap_page_range(unsigned long start, unsigned long end, 2408fc48985STejun Heo pgprot_t prot, struct page **pages) 2418fc48985STejun Heo { 2428fc48985STejun Heo int ret; 2438fc48985STejun Heo 2448fc48985STejun Heo ret = vmap_page_range_noflush(start, end, prot, pages); 2458fc48985STejun Heo flush_cache_vmap(start, end); 2468fc48985STejun Heo return ret; 2478fc48985STejun Heo } 2488fc48985STejun Heo 24981ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 25073bdf0a6SLinus Torvalds { 25173bdf0a6SLinus Torvalds /* 252ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 25373bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 25473bdf0a6SLinus Torvalds * just put it in the vmalloc space. 25573bdf0a6SLinus Torvalds */ 25673bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 25773bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 25873bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 25973bdf0a6SLinus Torvalds return 1; 26073bdf0a6SLinus Torvalds #endif 26173bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 26273bdf0a6SLinus Torvalds } 26373bdf0a6SLinus Torvalds 26448667e7aSChristoph Lameter /* 265add688fbSmalc * Walk a vmap address to the struct page it maps. 26648667e7aSChristoph Lameter */ 267add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 26848667e7aSChristoph Lameter { 26948667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 270add688fbSmalc struct page *page = NULL; 27148667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 272c2febafcSKirill A. Shutemov p4d_t *p4d; 273c2febafcSKirill A. Shutemov pud_t *pud; 274c2febafcSKirill A. Shutemov pmd_t *pmd; 275c2febafcSKirill A. Shutemov pte_t *ptep, pte; 27648667e7aSChristoph Lameter 2777aa413deSIngo Molnar /* 2787aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2797aa413deSIngo Molnar * architectures that do not vmalloc module space 2807aa413deSIngo Molnar */ 28173bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 28259ea7463SJiri Slaby 283c2febafcSKirill A. Shutemov if (pgd_none(*pgd)) 284c2febafcSKirill A. Shutemov return NULL; 285c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 286c2febafcSKirill A. Shutemov if (p4d_none(*p4d)) 287c2febafcSKirill A. Shutemov return NULL; 288c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 289029c54b0SArd Biesheuvel 290029c54b0SArd Biesheuvel /* 291029c54b0SArd Biesheuvel * Don't dereference bad PUD or PMD (below) entries. This will also 292029c54b0SArd Biesheuvel * identify huge mappings, which we may encounter on architectures 293029c54b0SArd Biesheuvel * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be 294029c54b0SArd Biesheuvel * identified as vmalloc addresses by is_vmalloc_addr(), but are 295029c54b0SArd Biesheuvel * not [unambiguously] associated with a struct page, so there is 296029c54b0SArd Biesheuvel * no correct value to return for them. 297029c54b0SArd Biesheuvel */ 298029c54b0SArd Biesheuvel WARN_ON_ONCE(pud_bad(*pud)); 299029c54b0SArd Biesheuvel if (pud_none(*pud) || pud_bad(*pud)) 300c2febafcSKirill A. Shutemov return NULL; 301c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr); 302029c54b0SArd Biesheuvel WARN_ON_ONCE(pmd_bad(*pmd)); 303029c54b0SArd Biesheuvel if (pmd_none(*pmd) || pmd_bad(*pmd)) 304c2febafcSKirill A. Shutemov return NULL; 305db64fe02SNick Piggin 30648667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 30748667e7aSChristoph Lameter pte = *ptep; 30848667e7aSChristoph Lameter if (pte_present(pte)) 309add688fbSmalc page = pte_page(pte); 31048667e7aSChristoph Lameter pte_unmap(ptep); 311add688fbSmalc return page; 312ece86e22SJianyu Zhan } 313ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 314ece86e22SJianyu Zhan 315add688fbSmalc /* 316add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 317add688fbSmalc */ 318add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 319add688fbSmalc { 320add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 321add688fbSmalc } 322add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 323add688fbSmalc 324db64fe02SNick Piggin 325db64fe02SNick Piggin /*** Global kva allocator ***/ 326db64fe02SNick Piggin 32778c72746SYisheng Xie #define VM_LAZY_FREE 0x02 328db64fe02SNick Piggin #define VM_VM_AREA 0x04 329db64fe02SNick Piggin 330db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 331f1c4069eSJoonsoo Kim /* Export for kexec only */ 332f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list); 33380c4bd7aSChris Wilson static LLIST_HEAD(vmap_purge_list); 33489699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 33589699605SNick Piggin 33689699605SNick Piggin /* The vmap cache globals are protected by vmap_area_lock */ 33789699605SNick Piggin static struct rb_node *free_vmap_cache; 33889699605SNick Piggin static unsigned long cached_hole_size; 33989699605SNick Piggin static unsigned long cached_vstart; 34089699605SNick Piggin static unsigned long cached_align; 34189699605SNick Piggin 342ca23e405STejun Heo static unsigned long vmap_area_pcpu_hole; 343db64fe02SNick Piggin 344db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 3451da177e4SLinus Torvalds { 346db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 347db64fe02SNick Piggin 348db64fe02SNick Piggin while (n) { 349db64fe02SNick Piggin struct vmap_area *va; 350db64fe02SNick Piggin 351db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 352db64fe02SNick Piggin if (addr < va->va_start) 353db64fe02SNick Piggin n = n->rb_left; 354cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 355db64fe02SNick Piggin n = n->rb_right; 356db64fe02SNick Piggin else 357db64fe02SNick Piggin return va; 358db64fe02SNick Piggin } 359db64fe02SNick Piggin 360db64fe02SNick Piggin return NULL; 361db64fe02SNick Piggin } 362db64fe02SNick Piggin 363db64fe02SNick Piggin static void __insert_vmap_area(struct vmap_area *va) 364db64fe02SNick Piggin { 365db64fe02SNick Piggin struct rb_node **p = &vmap_area_root.rb_node; 366db64fe02SNick Piggin struct rb_node *parent = NULL; 367db64fe02SNick Piggin struct rb_node *tmp; 368db64fe02SNick Piggin 369db64fe02SNick Piggin while (*p) { 370170168d0SNamhyung Kim struct vmap_area *tmp_va; 371db64fe02SNick Piggin 372db64fe02SNick Piggin parent = *p; 373170168d0SNamhyung Kim tmp_va = rb_entry(parent, struct vmap_area, rb_node); 374170168d0SNamhyung Kim if (va->va_start < tmp_va->va_end) 375db64fe02SNick Piggin p = &(*p)->rb_left; 376170168d0SNamhyung Kim else if (va->va_end > tmp_va->va_start) 377db64fe02SNick Piggin p = &(*p)->rb_right; 378db64fe02SNick Piggin else 379db64fe02SNick Piggin BUG(); 380db64fe02SNick Piggin } 381db64fe02SNick Piggin 382db64fe02SNick Piggin rb_link_node(&va->rb_node, parent, p); 383db64fe02SNick Piggin rb_insert_color(&va->rb_node, &vmap_area_root); 384db64fe02SNick Piggin 3854341fa45SJoonsoo Kim /* address-sort this list */ 386db64fe02SNick Piggin tmp = rb_prev(&va->rb_node); 387db64fe02SNick Piggin if (tmp) { 388db64fe02SNick Piggin struct vmap_area *prev; 389db64fe02SNick Piggin prev = rb_entry(tmp, struct vmap_area, rb_node); 390db64fe02SNick Piggin list_add_rcu(&va->list, &prev->list); 391db64fe02SNick Piggin } else 392db64fe02SNick Piggin list_add_rcu(&va->list, &vmap_area_list); 393db64fe02SNick Piggin } 394db64fe02SNick Piggin 395db64fe02SNick Piggin static void purge_vmap_area_lazy(void); 396db64fe02SNick Piggin 3974da56b99SChris Wilson static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 3984da56b99SChris Wilson 399db64fe02SNick Piggin /* 400db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 401db64fe02SNick Piggin * vstart and vend. 402db64fe02SNick Piggin */ 403db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 404db64fe02SNick Piggin unsigned long align, 405db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 406db64fe02SNick Piggin int node, gfp_t gfp_mask) 407db64fe02SNick Piggin { 408db64fe02SNick Piggin struct vmap_area *va; 409db64fe02SNick Piggin struct rb_node *n; 4101da177e4SLinus Torvalds unsigned long addr; 411db64fe02SNick Piggin int purged = 0; 41289699605SNick Piggin struct vmap_area *first; 413db64fe02SNick Piggin 4147766970cSNick Piggin BUG_ON(!size); 415891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 41689699605SNick Piggin BUG_ON(!is_power_of_2(align)); 417db64fe02SNick Piggin 4185803ed29SChristoph Hellwig might_sleep(); 4194da56b99SChris Wilson 420db64fe02SNick Piggin va = kmalloc_node(sizeof(struct vmap_area), 421db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 422db64fe02SNick Piggin if (unlikely(!va)) 423db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 424db64fe02SNick Piggin 4257f88f88fSCatalin Marinas /* 4267f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 4277f88f88fSCatalin Marinas * to avoid false negatives. 4287f88f88fSCatalin Marinas */ 4297f88f88fSCatalin Marinas kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); 4307f88f88fSCatalin Marinas 431db64fe02SNick Piggin retry: 432db64fe02SNick Piggin spin_lock(&vmap_area_lock); 43389699605SNick Piggin /* 43489699605SNick Piggin * Invalidate cache if we have more permissive parameters. 43589699605SNick Piggin * cached_hole_size notes the largest hole noticed _below_ 43689699605SNick Piggin * the vmap_area cached in free_vmap_cache: if size fits 43789699605SNick Piggin * into that hole, we want to scan from vstart to reuse 43889699605SNick Piggin * the hole instead of allocating above free_vmap_cache. 43989699605SNick Piggin * Note that __free_vmap_area may update free_vmap_cache 44089699605SNick Piggin * without updating cached_hole_size or cached_align. 44189699605SNick Piggin */ 44289699605SNick Piggin if (!free_vmap_cache || 44389699605SNick Piggin size < cached_hole_size || 44489699605SNick Piggin vstart < cached_vstart || 44589699605SNick Piggin align < cached_align) { 44689699605SNick Piggin nocache: 44789699605SNick Piggin cached_hole_size = 0; 44889699605SNick Piggin free_vmap_cache = NULL; 44989699605SNick Piggin } 45089699605SNick Piggin /* record if we encounter less permissive parameters */ 45189699605SNick Piggin cached_vstart = vstart; 45289699605SNick Piggin cached_align = align; 45389699605SNick Piggin 45489699605SNick Piggin /* find starting point for our search */ 45589699605SNick Piggin if (free_vmap_cache) { 45689699605SNick Piggin first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 457248ac0e1SJohannes Weiner addr = ALIGN(first->va_end, align); 45889699605SNick Piggin if (addr < vstart) 45989699605SNick Piggin goto nocache; 460bcb615a8SZhang Yanfei if (addr + size < addr) 4617766970cSNick Piggin goto overflow; 4627766970cSNick Piggin 46389699605SNick Piggin } else { 46489699605SNick Piggin addr = ALIGN(vstart, align); 465bcb615a8SZhang Yanfei if (addr + size < addr) 46689699605SNick Piggin goto overflow; 467db64fe02SNick Piggin 46889699605SNick Piggin n = vmap_area_root.rb_node; 46989699605SNick Piggin first = NULL; 47089699605SNick Piggin 47189699605SNick Piggin while (n) { 472db64fe02SNick Piggin struct vmap_area *tmp; 473db64fe02SNick Piggin tmp = rb_entry(n, struct vmap_area, rb_node); 474db64fe02SNick Piggin if (tmp->va_end >= addr) { 475db64fe02SNick Piggin first = tmp; 47689699605SNick Piggin if (tmp->va_start <= addr) 47789699605SNick Piggin break; 478db64fe02SNick Piggin n = n->rb_left; 47989699605SNick Piggin } else 480db64fe02SNick Piggin n = n->rb_right; 481db64fe02SNick Piggin } 482db64fe02SNick Piggin 483db64fe02SNick Piggin if (!first) 484db64fe02SNick Piggin goto found; 485db64fe02SNick Piggin } 486db64fe02SNick Piggin 48789699605SNick Piggin /* from the starting point, walk areas until a suitable hole is found */ 488248ac0e1SJohannes Weiner while (addr + size > first->va_start && addr + size <= vend) { 48989699605SNick Piggin if (addr + cached_hole_size < first->va_start) 49089699605SNick Piggin cached_hole_size = first->va_start - addr; 491248ac0e1SJohannes Weiner addr = ALIGN(first->va_end, align); 492bcb615a8SZhang Yanfei if (addr + size < addr) 4937766970cSNick Piggin goto overflow; 494db64fe02SNick Piggin 49592ca922fSHong zhi guo if (list_is_last(&first->list, &vmap_area_list)) 496db64fe02SNick Piggin goto found; 49792ca922fSHong zhi guo 4986219c2a2SGeliang Tang first = list_next_entry(first, list); 499db64fe02SNick Piggin } 50089699605SNick Piggin 501db64fe02SNick Piggin found: 502afd07389SUladzislau Rezki (Sony) /* 503afd07389SUladzislau Rezki (Sony) * Check also calculated address against the vstart, 504afd07389SUladzislau Rezki (Sony) * because it can be 0 because of big align request. 505afd07389SUladzislau Rezki (Sony) */ 506afd07389SUladzislau Rezki (Sony) if (addr + size > vend || addr < vstart) 50789699605SNick Piggin goto overflow; 50889699605SNick Piggin 50989699605SNick Piggin va->va_start = addr; 51089699605SNick Piggin va->va_end = addr + size; 51189699605SNick Piggin va->flags = 0; 51289699605SNick Piggin __insert_vmap_area(va); 51389699605SNick Piggin free_vmap_cache = &va->rb_node; 51489699605SNick Piggin spin_unlock(&vmap_area_lock); 51589699605SNick Piggin 51661e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 51789699605SNick Piggin BUG_ON(va->va_start < vstart); 51889699605SNick Piggin BUG_ON(va->va_end > vend); 51989699605SNick Piggin 52089699605SNick Piggin return va; 52189699605SNick Piggin 5227766970cSNick Piggin overflow: 523db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 524db64fe02SNick Piggin if (!purged) { 525db64fe02SNick Piggin purge_vmap_area_lazy(); 526db64fe02SNick Piggin purged = 1; 527db64fe02SNick Piggin goto retry; 528db64fe02SNick Piggin } 5294da56b99SChris Wilson 5304da56b99SChris Wilson if (gfpflags_allow_blocking(gfp_mask)) { 5314da56b99SChris Wilson unsigned long freed = 0; 5324da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 5334da56b99SChris Wilson if (freed > 0) { 5344da56b99SChris Wilson purged = 0; 5354da56b99SChris Wilson goto retry; 5364da56b99SChris Wilson } 5374da56b99SChris Wilson } 5384da56b99SChris Wilson 53903497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 540756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 541756a025fSJoe Perches size); 5422498ce42SRalph Wuerthner kfree(va); 543db64fe02SNick Piggin return ERR_PTR(-EBUSY); 544db64fe02SNick Piggin } 545db64fe02SNick Piggin 5464da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 5474da56b99SChris Wilson { 5484da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 5494da56b99SChris Wilson } 5504da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 5514da56b99SChris Wilson 5524da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 5534da56b99SChris Wilson { 5544da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 5554da56b99SChris Wilson } 5564da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 5574da56b99SChris Wilson 558db64fe02SNick Piggin static void __free_vmap_area(struct vmap_area *va) 559db64fe02SNick Piggin { 560db64fe02SNick Piggin BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 56189699605SNick Piggin 56289699605SNick Piggin if (free_vmap_cache) { 56389699605SNick Piggin if (va->va_end < cached_vstart) { 56489699605SNick Piggin free_vmap_cache = NULL; 56589699605SNick Piggin } else { 56689699605SNick Piggin struct vmap_area *cache; 56789699605SNick Piggin cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 56889699605SNick Piggin if (va->va_start <= cache->va_start) { 56989699605SNick Piggin free_vmap_cache = rb_prev(&va->rb_node); 57089699605SNick Piggin /* 57189699605SNick Piggin * We don't try to update cached_hole_size or 57289699605SNick Piggin * cached_align, but it won't go very wrong. 57389699605SNick Piggin */ 57489699605SNick Piggin } 57589699605SNick Piggin } 57689699605SNick Piggin } 577db64fe02SNick Piggin rb_erase(&va->rb_node, &vmap_area_root); 578db64fe02SNick Piggin RB_CLEAR_NODE(&va->rb_node); 579db64fe02SNick Piggin list_del_rcu(&va->list); 580db64fe02SNick Piggin 581ca23e405STejun Heo /* 582ca23e405STejun Heo * Track the highest possible candidate for pcpu area 583ca23e405STejun Heo * allocation. Areas outside of vmalloc area can be returned 584ca23e405STejun Heo * here too, consider only end addresses which fall inside 585ca23e405STejun Heo * vmalloc area proper. 586ca23e405STejun Heo */ 587ca23e405STejun Heo if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 588ca23e405STejun Heo vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 589ca23e405STejun Heo 59014769de9SLai Jiangshan kfree_rcu(va, rcu_head); 591db64fe02SNick Piggin } 592db64fe02SNick Piggin 593db64fe02SNick Piggin /* 594db64fe02SNick Piggin * Free a region of KVA allocated by alloc_vmap_area 595db64fe02SNick Piggin */ 596db64fe02SNick Piggin static void free_vmap_area(struct vmap_area *va) 597db64fe02SNick Piggin { 598db64fe02SNick Piggin spin_lock(&vmap_area_lock); 599db64fe02SNick Piggin __free_vmap_area(va); 600db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 601db64fe02SNick Piggin } 602db64fe02SNick Piggin 603db64fe02SNick Piggin /* 604db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 605db64fe02SNick Piggin */ 606db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 607db64fe02SNick Piggin { 608db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 609db64fe02SNick Piggin } 610db64fe02SNick Piggin 611db64fe02SNick Piggin /* 612db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 613db64fe02SNick Piggin * before attempting to purge with a TLB flush. 614db64fe02SNick Piggin * 615db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 616db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 617db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 618db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 619db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 620db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 621db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 622db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 623db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 624db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 625db64fe02SNick Piggin * becomes a problem on bigger systems. 626db64fe02SNick Piggin */ 627db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 628db64fe02SNick Piggin { 629db64fe02SNick Piggin unsigned int log; 630db64fe02SNick Piggin 631db64fe02SNick Piggin log = fls(num_online_cpus()); 632db64fe02SNick Piggin 633db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 634db64fe02SNick Piggin } 635db64fe02SNick Piggin 636db64fe02SNick Piggin static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 637db64fe02SNick Piggin 6380574ecd1SChristoph Hellwig /* 6390574ecd1SChristoph Hellwig * Serialize vmap purging. There is no actual criticial section protected 6400574ecd1SChristoph Hellwig * by this look, but we want to avoid concurrent calls for performance 6410574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 6420574ecd1SChristoph Hellwig */ 643f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 6440574ecd1SChristoph Hellwig 64502b709dfSNick Piggin /* for per-CPU blocks */ 64602b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 64702b709dfSNick Piggin 648db64fe02SNick Piggin /* 6493ee48b6aSCliff Wickman * called before a call to iounmap() if the caller wants vm_area_struct's 6503ee48b6aSCliff Wickman * immediately freed. 6513ee48b6aSCliff Wickman */ 6523ee48b6aSCliff Wickman void set_iounmap_nonlazy(void) 6533ee48b6aSCliff Wickman { 6543ee48b6aSCliff Wickman atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); 6553ee48b6aSCliff Wickman } 6563ee48b6aSCliff Wickman 6573ee48b6aSCliff Wickman /* 658db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 659db64fe02SNick Piggin */ 6600574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 661db64fe02SNick Piggin { 66280c4bd7aSChris Wilson struct llist_node *valist; 663db64fe02SNick Piggin struct vmap_area *va; 664cbb76676SVegard Nossum struct vmap_area *n_va; 665*68571be9SUladzislau Rezki (Sony) int resched_threshold; 666db64fe02SNick Piggin 6670574ecd1SChristoph Hellwig lockdep_assert_held(&vmap_purge_lock); 66802b709dfSNick Piggin 66980c4bd7aSChris Wilson valist = llist_del_all(&vmap_purge_list); 670*68571be9SUladzislau Rezki (Sony) if (unlikely(valist == NULL)) 671*68571be9SUladzislau Rezki (Sony) return false; 672*68571be9SUladzislau Rezki (Sony) 673*68571be9SUladzislau Rezki (Sony) /* 674*68571be9SUladzislau Rezki (Sony) * TODO: to calculate a flush range without looping. 675*68571be9SUladzislau Rezki (Sony) * The list can be up to lazy_max_pages() elements. 676*68571be9SUladzislau Rezki (Sony) */ 67780c4bd7aSChris Wilson llist_for_each_entry(va, valist, purge_list) { 6780574ecd1SChristoph Hellwig if (va->va_start < start) 6790574ecd1SChristoph Hellwig start = va->va_start; 6800574ecd1SChristoph Hellwig if (va->va_end > end) 6810574ecd1SChristoph Hellwig end = va->va_end; 682db64fe02SNick Piggin } 683db64fe02SNick Piggin 6840574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 685*68571be9SUladzislau Rezki (Sony) resched_threshold = (int) lazy_max_pages() << 1; 686db64fe02SNick Piggin 687db64fe02SNick Piggin spin_lock(&vmap_area_lock); 688763b218dSJoel Fernandes llist_for_each_entry_safe(va, n_va, valist, purge_list) { 689763b218dSJoel Fernandes int nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 690763b218dSJoel Fernandes 691db64fe02SNick Piggin __free_vmap_area(va); 692763b218dSJoel Fernandes atomic_sub(nr, &vmap_lazy_nr); 693*68571be9SUladzislau Rezki (Sony) 694*68571be9SUladzislau Rezki (Sony) if (atomic_read(&vmap_lazy_nr) < resched_threshold) 695763b218dSJoel Fernandes cond_resched_lock(&vmap_area_lock); 696763b218dSJoel Fernandes } 697db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 6980574ecd1SChristoph Hellwig return true; 699db64fe02SNick Piggin } 700db64fe02SNick Piggin 701db64fe02SNick Piggin /* 702496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 703496850e5SNick Piggin * is already purging. 704496850e5SNick Piggin */ 705496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 706496850e5SNick Piggin { 707f9e09977SChristoph Hellwig if (mutex_trylock(&vmap_purge_lock)) { 7080574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 709f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 7100574ecd1SChristoph Hellwig } 711496850e5SNick Piggin } 712496850e5SNick Piggin 713496850e5SNick Piggin /* 714db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 715db64fe02SNick Piggin */ 716db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 717db64fe02SNick Piggin { 718f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 7190574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 7200574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 721f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 722db64fe02SNick Piggin } 723db64fe02SNick Piggin 724db64fe02SNick Piggin /* 72564141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 72664141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 72764141da5SJeremy Fitzhardinge * previously. 728db64fe02SNick Piggin */ 72964141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 730db64fe02SNick Piggin { 73180c4bd7aSChris Wilson int nr_lazy; 73280c4bd7aSChris Wilson 73380c4bd7aSChris Wilson nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, 73480c4bd7aSChris Wilson &vmap_lazy_nr); 73580c4bd7aSChris Wilson 73680c4bd7aSChris Wilson /* After this point, we may free va at any time */ 73780c4bd7aSChris Wilson llist_add(&va->purge_list, &vmap_purge_list); 73880c4bd7aSChris Wilson 73980c4bd7aSChris Wilson if (unlikely(nr_lazy > lazy_max_pages())) 740496850e5SNick Piggin try_purge_vmap_area_lazy(); 741db64fe02SNick Piggin } 742db64fe02SNick Piggin 743b29acbdcSNick Piggin /* 744b29acbdcSNick Piggin * Free and unmap a vmap area 745b29acbdcSNick Piggin */ 746b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 747b29acbdcSNick Piggin { 748b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 749c8eef01eSChristoph Hellwig unmap_vmap_area(va); 75082a2e924SChintan Pandya if (debug_pagealloc_enabled()) 75182a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end); 75282a2e924SChintan Pandya 753c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 754b29acbdcSNick Piggin } 755b29acbdcSNick Piggin 756db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 757db64fe02SNick Piggin { 758db64fe02SNick Piggin struct vmap_area *va; 759db64fe02SNick Piggin 760db64fe02SNick Piggin spin_lock(&vmap_area_lock); 761db64fe02SNick Piggin va = __find_vmap_area(addr); 762db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 763db64fe02SNick Piggin 764db64fe02SNick Piggin return va; 765db64fe02SNick Piggin } 766db64fe02SNick Piggin 767db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 768db64fe02SNick Piggin 769db64fe02SNick Piggin /* 770db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 771db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 772db64fe02SNick Piggin */ 773db64fe02SNick Piggin /* 774db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 775db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 776db64fe02SNick Piggin * instead (we just need a rough idea) 777db64fe02SNick Piggin */ 778db64fe02SNick Piggin #if BITS_PER_LONG == 32 779db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 780db64fe02SNick Piggin #else 781db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 782db64fe02SNick Piggin #endif 783db64fe02SNick Piggin 784db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 785db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 786db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 787db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 788db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 789db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 790f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 791f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 792db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 793f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 794db64fe02SNick Piggin 795db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 796db64fe02SNick Piggin 7979b463334SJeremy Fitzhardinge static bool vmap_initialized __read_mostly = false; 7989b463334SJeremy Fitzhardinge 799db64fe02SNick Piggin struct vmap_block_queue { 800db64fe02SNick Piggin spinlock_t lock; 801db64fe02SNick Piggin struct list_head free; 802db64fe02SNick Piggin }; 803db64fe02SNick Piggin 804db64fe02SNick Piggin struct vmap_block { 805db64fe02SNick Piggin spinlock_t lock; 806db64fe02SNick Piggin struct vmap_area *va; 807db64fe02SNick Piggin unsigned long free, dirty; 8087d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 809db64fe02SNick Piggin struct list_head free_list; 810db64fe02SNick Piggin struct rcu_head rcu_head; 81102b709dfSNick Piggin struct list_head purge; 812db64fe02SNick Piggin }; 813db64fe02SNick Piggin 814db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 815db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 816db64fe02SNick Piggin 817db64fe02SNick Piggin /* 818db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 819db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 820db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 821db64fe02SNick Piggin */ 822db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 823db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 824db64fe02SNick Piggin 825db64fe02SNick Piggin /* 826db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 827db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 828db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 829db64fe02SNick Piggin * big problem. 830db64fe02SNick Piggin */ 831db64fe02SNick Piggin 832db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 833db64fe02SNick Piggin { 834db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 835db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 836db64fe02SNick Piggin return addr; 837db64fe02SNick Piggin } 838db64fe02SNick Piggin 839cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 840cf725ce2SRoman Pen { 841cf725ce2SRoman Pen unsigned long addr; 842cf725ce2SRoman Pen 843cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 844cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 845cf725ce2SRoman Pen return (void *)addr; 846cf725ce2SRoman Pen } 847cf725ce2SRoman Pen 848cf725ce2SRoman Pen /** 849cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 850cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 851cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 852cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 853cf725ce2SRoman Pen * 854a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 855cf725ce2SRoman Pen */ 856cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 857db64fe02SNick Piggin { 858db64fe02SNick Piggin struct vmap_block_queue *vbq; 859db64fe02SNick Piggin struct vmap_block *vb; 860db64fe02SNick Piggin struct vmap_area *va; 861db64fe02SNick Piggin unsigned long vb_idx; 862db64fe02SNick Piggin int node, err; 863cf725ce2SRoman Pen void *vaddr; 864db64fe02SNick Piggin 865db64fe02SNick Piggin node = numa_node_id(); 866db64fe02SNick Piggin 867db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 868db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 869db64fe02SNick Piggin if (unlikely(!vb)) 870db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 871db64fe02SNick Piggin 872db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 873db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 874db64fe02SNick Piggin node, gfp_mask); 875ddf9c6d4STobias Klauser if (IS_ERR(va)) { 876db64fe02SNick Piggin kfree(vb); 877e7d86340SJulia Lawall return ERR_CAST(va); 878db64fe02SNick Piggin } 879db64fe02SNick Piggin 880db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 881db64fe02SNick Piggin if (unlikely(err)) { 882db64fe02SNick Piggin kfree(vb); 883db64fe02SNick Piggin free_vmap_area(va); 884db64fe02SNick Piggin return ERR_PTR(err); 885db64fe02SNick Piggin } 886db64fe02SNick Piggin 887cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 888db64fe02SNick Piggin spin_lock_init(&vb->lock); 889db64fe02SNick Piggin vb->va = va; 890cf725ce2SRoman Pen /* At least something should be left free */ 891cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 892cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 893db64fe02SNick Piggin vb->dirty = 0; 8947d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 8957d61bfe8SRoman Pen vb->dirty_max = 0; 896db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 897db64fe02SNick Piggin 898db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 899db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 900db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 901db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 902db64fe02SNick Piggin BUG_ON(err); 903db64fe02SNick Piggin radix_tree_preload_end(); 904db64fe02SNick Piggin 905db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 906db64fe02SNick Piggin spin_lock(&vbq->lock); 90768ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 908db64fe02SNick Piggin spin_unlock(&vbq->lock); 9093f04ba85STejun Heo put_cpu_var(vmap_block_queue); 910db64fe02SNick Piggin 911cf725ce2SRoman Pen return vaddr; 912db64fe02SNick Piggin } 913db64fe02SNick Piggin 914db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 915db64fe02SNick Piggin { 916db64fe02SNick Piggin struct vmap_block *tmp; 917db64fe02SNick Piggin unsigned long vb_idx; 918db64fe02SNick Piggin 919db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 920db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 921db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 922db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 923db64fe02SNick Piggin BUG_ON(tmp != vb); 924db64fe02SNick Piggin 92564141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 92622a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 927db64fe02SNick Piggin } 928db64fe02SNick Piggin 92902b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 93002b709dfSNick Piggin { 93102b709dfSNick Piggin LIST_HEAD(purge); 93202b709dfSNick Piggin struct vmap_block *vb; 93302b709dfSNick Piggin struct vmap_block *n_vb; 93402b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 93502b709dfSNick Piggin 93602b709dfSNick Piggin rcu_read_lock(); 93702b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 93802b709dfSNick Piggin 93902b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 94002b709dfSNick Piggin continue; 94102b709dfSNick Piggin 94202b709dfSNick Piggin spin_lock(&vb->lock); 94302b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 94402b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 94502b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 9467d61bfe8SRoman Pen vb->dirty_min = 0; 9477d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 94802b709dfSNick Piggin spin_lock(&vbq->lock); 94902b709dfSNick Piggin list_del_rcu(&vb->free_list); 95002b709dfSNick Piggin spin_unlock(&vbq->lock); 95102b709dfSNick Piggin spin_unlock(&vb->lock); 95202b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 95302b709dfSNick Piggin } else 95402b709dfSNick Piggin spin_unlock(&vb->lock); 95502b709dfSNick Piggin } 95602b709dfSNick Piggin rcu_read_unlock(); 95702b709dfSNick Piggin 95802b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 95902b709dfSNick Piggin list_del(&vb->purge); 96002b709dfSNick Piggin free_vmap_block(vb); 96102b709dfSNick Piggin } 96202b709dfSNick Piggin } 96302b709dfSNick Piggin 96402b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 96502b709dfSNick Piggin { 96602b709dfSNick Piggin int cpu; 96702b709dfSNick Piggin 96802b709dfSNick Piggin for_each_possible_cpu(cpu) 96902b709dfSNick Piggin purge_fragmented_blocks(cpu); 97002b709dfSNick Piggin } 97102b709dfSNick Piggin 972db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 973db64fe02SNick Piggin { 974db64fe02SNick Piggin struct vmap_block_queue *vbq; 975db64fe02SNick Piggin struct vmap_block *vb; 976cf725ce2SRoman Pen void *vaddr = NULL; 977db64fe02SNick Piggin unsigned int order; 978db64fe02SNick Piggin 979891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 980db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 981aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 982aa91c4d8SJan Kara /* 983aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 984aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 985aa91c4d8SJan Kara * early. 986aa91c4d8SJan Kara */ 987aa91c4d8SJan Kara return NULL; 988aa91c4d8SJan Kara } 989db64fe02SNick Piggin order = get_order(size); 990db64fe02SNick Piggin 991db64fe02SNick Piggin rcu_read_lock(); 992db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 993db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 994cf725ce2SRoman Pen unsigned long pages_off; 995db64fe02SNick Piggin 996db64fe02SNick Piggin spin_lock(&vb->lock); 997cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 998cf725ce2SRoman Pen spin_unlock(&vb->lock); 999cf725ce2SRoman Pen continue; 1000cf725ce2SRoman Pen } 100102b709dfSNick Piggin 1002cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 1003cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 1004db64fe02SNick Piggin vb->free -= 1UL << order; 1005db64fe02SNick Piggin if (vb->free == 0) { 1006db64fe02SNick Piggin spin_lock(&vbq->lock); 1007de560423SNick Piggin list_del_rcu(&vb->free_list); 1008db64fe02SNick Piggin spin_unlock(&vbq->lock); 1009db64fe02SNick Piggin } 1010cf725ce2SRoman Pen 1011db64fe02SNick Piggin spin_unlock(&vb->lock); 1012db64fe02SNick Piggin break; 1013db64fe02SNick Piggin } 101402b709dfSNick Piggin 10153f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1016db64fe02SNick Piggin rcu_read_unlock(); 1017db64fe02SNick Piggin 1018cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 1019cf725ce2SRoman Pen if (!vaddr) 1020cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 1021db64fe02SNick Piggin 1022cf725ce2SRoman Pen return vaddr; 1023db64fe02SNick Piggin } 1024db64fe02SNick Piggin 1025db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 1026db64fe02SNick Piggin { 1027db64fe02SNick Piggin unsigned long offset; 1028db64fe02SNick Piggin unsigned long vb_idx; 1029db64fe02SNick Piggin unsigned int order; 1030db64fe02SNick Piggin struct vmap_block *vb; 1031db64fe02SNick Piggin 1032891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1033db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1034b29acbdcSNick Piggin 1035b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 1036b29acbdcSNick Piggin 1037db64fe02SNick Piggin order = get_order(size); 1038db64fe02SNick Piggin 1039db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 10407d61bfe8SRoman Pen offset >>= PAGE_SHIFT; 1041db64fe02SNick Piggin 1042db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 1043db64fe02SNick Piggin rcu_read_lock(); 1044db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 1045db64fe02SNick Piggin rcu_read_unlock(); 1046db64fe02SNick Piggin BUG_ON(!vb); 1047db64fe02SNick Piggin 104864141da5SJeremy Fitzhardinge vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 104964141da5SJeremy Fitzhardinge 105082a2e924SChintan Pandya if (debug_pagealloc_enabled()) 105182a2e924SChintan Pandya flush_tlb_kernel_range((unsigned long)addr, 105282a2e924SChintan Pandya (unsigned long)addr + size); 105382a2e924SChintan Pandya 1054db64fe02SNick Piggin spin_lock(&vb->lock); 10557d61bfe8SRoman Pen 10567d61bfe8SRoman Pen /* Expand dirty range */ 10577d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 10587d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 1059d086817dSMinChan Kim 1060db64fe02SNick Piggin vb->dirty += 1UL << order; 1061db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 1062de560423SNick Piggin BUG_ON(vb->free); 1063db64fe02SNick Piggin spin_unlock(&vb->lock); 1064db64fe02SNick Piggin free_vmap_block(vb); 1065db64fe02SNick Piggin } else 1066db64fe02SNick Piggin spin_unlock(&vb->lock); 1067db64fe02SNick Piggin } 1068db64fe02SNick Piggin 1069868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 1070db64fe02SNick Piggin { 1071db64fe02SNick Piggin int cpu; 1072db64fe02SNick Piggin 10739b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 10749b463334SJeremy Fitzhardinge return; 10759b463334SJeremy Fitzhardinge 10765803ed29SChristoph Hellwig might_sleep(); 10775803ed29SChristoph Hellwig 1078db64fe02SNick Piggin for_each_possible_cpu(cpu) { 1079db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1080db64fe02SNick Piggin struct vmap_block *vb; 1081db64fe02SNick Piggin 1082db64fe02SNick Piggin rcu_read_lock(); 1083db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1084db64fe02SNick Piggin spin_lock(&vb->lock); 10857d61bfe8SRoman Pen if (vb->dirty) { 10867d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 1087db64fe02SNick Piggin unsigned long s, e; 1088b136be5eSJoonsoo Kim 10897d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 10907d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 1091db64fe02SNick Piggin 10927d61bfe8SRoman Pen start = min(s, start); 10937d61bfe8SRoman Pen end = max(e, end); 10947d61bfe8SRoman Pen 1095db64fe02SNick Piggin flush = 1; 1096db64fe02SNick Piggin } 1097db64fe02SNick Piggin spin_unlock(&vb->lock); 1098db64fe02SNick Piggin } 1099db64fe02SNick Piggin rcu_read_unlock(); 1100db64fe02SNick Piggin } 1101db64fe02SNick Piggin 1102f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 11030574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 11040574ecd1SChristoph Hellwig if (!__purge_vmap_area_lazy(start, end) && flush) 11050574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 1106f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1107db64fe02SNick Piggin } 1108868b104dSRick Edgecombe 1109868b104dSRick Edgecombe /** 1110868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 1111868b104dSRick Edgecombe * 1112868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1113868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you 1114868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual 1115868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries 1116868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping). 1117868b104dSRick Edgecombe * 1118868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1119868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases 1120868b104dSRick Edgecombe * from the vmap layer. 1121868b104dSRick Edgecombe */ 1122868b104dSRick Edgecombe void vm_unmap_aliases(void) 1123868b104dSRick Edgecombe { 1124868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 1125868b104dSRick Edgecombe int flush = 0; 1126868b104dSRick Edgecombe 1127868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush); 1128868b104dSRick Edgecombe } 1129db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1130db64fe02SNick Piggin 1131db64fe02SNick Piggin /** 1132db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1133db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 1134db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1135db64fe02SNick Piggin */ 1136db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 1137db64fe02SNick Piggin { 113865ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1139db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 11409c3acf60SChristoph Hellwig struct vmap_area *va; 1141db64fe02SNick Piggin 11425803ed29SChristoph Hellwig might_sleep(); 1143db64fe02SNick Piggin BUG_ON(!addr); 1144db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 1145db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 1146a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 1147db64fe02SNick Piggin 11489c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 114905e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size); 1150db64fe02SNick Piggin vb_free(mem, size); 11519c3acf60SChristoph Hellwig return; 11529c3acf60SChristoph Hellwig } 11539c3acf60SChristoph Hellwig 11549c3acf60SChristoph Hellwig va = find_vmap_area(addr); 11559c3acf60SChristoph Hellwig BUG_ON(!va); 115605e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start, 115705e3ff95SChintan Pandya (va->va_end - va->va_start)); 11589c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 1159db64fe02SNick Piggin } 1160db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 1161db64fe02SNick Piggin 1162db64fe02SNick Piggin /** 1163db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1164db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 1165db64fe02SNick Piggin * @count: number of pages 1166db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 1167db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1168e99c97adSRandy Dunlap * 116936437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 117036437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 117136437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 117236437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 117336437638SGioh Kim * the end. Please use this function for short-lived objects. 117436437638SGioh Kim * 1175e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 1176db64fe02SNick Piggin */ 1177db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1178db64fe02SNick Piggin { 117965ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1180db64fe02SNick Piggin unsigned long addr; 1181db64fe02SNick Piggin void *mem; 1182db64fe02SNick Piggin 1183db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 1184db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 1185db64fe02SNick Piggin if (IS_ERR(mem)) 1186db64fe02SNick Piggin return NULL; 1187db64fe02SNick Piggin addr = (unsigned long)mem; 1188db64fe02SNick Piggin } else { 1189db64fe02SNick Piggin struct vmap_area *va; 1190db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 1191db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1192db64fe02SNick Piggin if (IS_ERR(va)) 1193db64fe02SNick Piggin return NULL; 1194db64fe02SNick Piggin 1195db64fe02SNick Piggin addr = va->va_start; 1196db64fe02SNick Piggin mem = (void *)addr; 1197db64fe02SNick Piggin } 1198db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1199db64fe02SNick Piggin vm_unmap_ram(mem, count); 1200db64fe02SNick Piggin return NULL; 1201db64fe02SNick Piggin } 1202db64fe02SNick Piggin return mem; 1203db64fe02SNick Piggin } 1204db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 1205db64fe02SNick Piggin 12064341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 120792eac168SMike Rapoport 1208f0aa6617STejun Heo /** 1209be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 1210be9b7335SNicolas Pitre * @vm: vm_struct to add 1211be9b7335SNicolas Pitre * 1212be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 1213be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1214be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 1215be9b7335SNicolas Pitre * 1216be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1217be9b7335SNicolas Pitre */ 1218be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 1219be9b7335SNicolas Pitre { 1220be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 1221be9b7335SNicolas Pitre 1222be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 1223be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1224be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 1225be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 1226be9b7335SNicolas Pitre break; 1227be9b7335SNicolas Pitre } else 1228be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 1229be9b7335SNicolas Pitre } 1230be9b7335SNicolas Pitre vm->next = *p; 1231be9b7335SNicolas Pitre *p = vm; 1232be9b7335SNicolas Pitre } 1233be9b7335SNicolas Pitre 1234be9b7335SNicolas Pitre /** 1235f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 1236f0aa6617STejun Heo * @vm: vm_struct to register 1237c0c0a293STejun Heo * @align: requested alignment 1238f0aa6617STejun Heo * 1239f0aa6617STejun Heo * This function is used to register kernel vm area before 1240f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 1241f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 1242f0aa6617STejun Heo * vm->addr contains the allocated address. 1243f0aa6617STejun Heo * 1244f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1245f0aa6617STejun Heo */ 1246c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1247f0aa6617STejun Heo { 1248f0aa6617STejun Heo static size_t vm_init_off __initdata; 1249c0c0a293STejun Heo unsigned long addr; 1250f0aa6617STejun Heo 1251c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 1252c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1253c0c0a293STejun Heo 1254c0c0a293STejun Heo vm->addr = (void *)addr; 1255f0aa6617STejun Heo 1256be9b7335SNicolas Pitre vm_area_add_early(vm); 1257f0aa6617STejun Heo } 1258f0aa6617STejun Heo 1259db64fe02SNick Piggin void __init vmalloc_init(void) 1260db64fe02SNick Piggin { 1261822c18f2SIvan Kokshaysky struct vmap_area *va; 1262822c18f2SIvan Kokshaysky struct vm_struct *tmp; 1263db64fe02SNick Piggin int i; 1264db64fe02SNick Piggin 1265db64fe02SNick Piggin for_each_possible_cpu(i) { 1266db64fe02SNick Piggin struct vmap_block_queue *vbq; 126732fcfd40SAl Viro struct vfree_deferred *p; 1268db64fe02SNick Piggin 1269db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1270db64fe02SNick Piggin spin_lock_init(&vbq->lock); 1271db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 127232fcfd40SAl Viro p = &per_cpu(vfree_deferred, i); 127332fcfd40SAl Viro init_llist_head(&p->list); 127432fcfd40SAl Viro INIT_WORK(&p->wq, free_work); 1275db64fe02SNick Piggin } 12769b463334SJeremy Fitzhardinge 1277822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1278822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 127943ebdac4SPekka Enberg va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1280dbda591dSKyongHo va->flags = VM_VM_AREA; 1281822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1282822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1283dbda591dSKyongHo va->vm = tmp; 1284822c18f2SIvan Kokshaysky __insert_vmap_area(va); 1285822c18f2SIvan Kokshaysky } 1286ca23e405STejun Heo 1287ca23e405STejun Heo vmap_area_pcpu_hole = VMALLOC_END; 1288ca23e405STejun Heo 12899b463334SJeremy Fitzhardinge vmap_initialized = true; 1290db64fe02SNick Piggin } 1291db64fe02SNick Piggin 12928fc48985STejun Heo /** 12938fc48985STejun Heo * map_kernel_range_noflush - map kernel VM area with the specified pages 12948fc48985STejun Heo * @addr: start of the VM area to map 12958fc48985STejun Heo * @size: size of the VM area to map 12968fc48985STejun Heo * @prot: page protection flags to use 12978fc48985STejun Heo * @pages: pages to map 12988fc48985STejun Heo * 12998fc48985STejun Heo * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 13008fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 13018fc48985STejun Heo * friends. 13028fc48985STejun Heo * 13038fc48985STejun Heo * NOTE: 13048fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 13058fc48985STejun Heo * responsible for calling flush_cache_vmap() on to-be-mapped areas 13068fc48985STejun Heo * before calling this function. 13078fc48985STejun Heo * 13088fc48985STejun Heo * RETURNS: 13098fc48985STejun Heo * The number of pages mapped on success, -errno on failure. 13108fc48985STejun Heo */ 13118fc48985STejun Heo int map_kernel_range_noflush(unsigned long addr, unsigned long size, 13128fc48985STejun Heo pgprot_t prot, struct page **pages) 13138fc48985STejun Heo { 13148fc48985STejun Heo return vmap_page_range_noflush(addr, addr + size, prot, pages); 13158fc48985STejun Heo } 13168fc48985STejun Heo 13178fc48985STejun Heo /** 13188fc48985STejun Heo * unmap_kernel_range_noflush - unmap kernel VM area 13198fc48985STejun Heo * @addr: start of the VM area to unmap 13208fc48985STejun Heo * @size: size of the VM area to unmap 13218fc48985STejun Heo * 13228fc48985STejun Heo * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 13238fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 13248fc48985STejun Heo * friends. 13258fc48985STejun Heo * 13268fc48985STejun Heo * NOTE: 13278fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 13288fc48985STejun Heo * responsible for calling flush_cache_vunmap() on to-be-mapped areas 13298fc48985STejun Heo * before calling this function and flush_tlb_kernel_range() after. 13308fc48985STejun Heo */ 13318fc48985STejun Heo void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 13328fc48985STejun Heo { 13338fc48985STejun Heo vunmap_page_range(addr, addr + size); 13348fc48985STejun Heo } 133581e88fdcSHuang Ying EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 13368fc48985STejun Heo 13378fc48985STejun Heo /** 13388fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 13398fc48985STejun Heo * @addr: start of the VM area to unmap 13408fc48985STejun Heo * @size: size of the VM area to unmap 13418fc48985STejun Heo * 13428fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 13438fc48985STejun Heo * the unmapping and tlb after. 13448fc48985STejun Heo */ 1345db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 1346db64fe02SNick Piggin { 1347db64fe02SNick Piggin unsigned long end = addr + size; 1348f6fcba70STejun Heo 1349f6fcba70STejun Heo flush_cache_vunmap(addr, end); 1350db64fe02SNick Piggin vunmap_page_range(addr, end); 1351db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 1352db64fe02SNick Piggin } 135393ef6d6cSMinchan Kim EXPORT_SYMBOL_GPL(unmap_kernel_range); 1354db64fe02SNick Piggin 1355f6f8ed47SWANG Chao int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) 1356db64fe02SNick Piggin { 1357db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 1358762216abSWanpeng Li unsigned long end = addr + get_vm_area_size(area); 1359db64fe02SNick Piggin int err; 1360db64fe02SNick Piggin 1361f6f8ed47SWANG Chao err = vmap_page_range(addr, end, prot, pages); 1362db64fe02SNick Piggin 1363f6f8ed47SWANG Chao return err > 0 ? 0 : err; 1364db64fe02SNick Piggin } 1365db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 1366db64fe02SNick Piggin 1367f5252e00SMitsuo Hayasaka static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 13685e6cafc8SMarek Szyprowski unsigned long flags, const void *caller) 1369cf88c790STejun Heo { 1370c69480adSJoonsoo Kim spin_lock(&vmap_area_lock); 1371cf88c790STejun Heo vm->flags = flags; 1372cf88c790STejun Heo vm->addr = (void *)va->va_start; 1373cf88c790STejun Heo vm->size = va->va_end - va->va_start; 1374cf88c790STejun Heo vm->caller = caller; 1375db1aecafSMinchan Kim va->vm = vm; 1376cf88c790STejun Heo va->flags |= VM_VM_AREA; 1377c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 1378f5252e00SMitsuo Hayasaka } 1379cf88c790STejun Heo 138020fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 1381f5252e00SMitsuo Hayasaka { 1382d4033afdSJoonsoo Kim /* 138320fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 1384d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 1385d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 1386d4033afdSJoonsoo Kim */ 1387d4033afdSJoonsoo Kim smp_wmb(); 138820fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 1389cf88c790STejun Heo } 1390cf88c790STejun Heo 1391db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 13922dca6999SDavid Miller unsigned long align, unsigned long flags, unsigned long start, 13935e6cafc8SMarek Szyprowski unsigned long end, int node, gfp_t gfp_mask, const void *caller) 1394db64fe02SNick Piggin { 13950006526dSKautuk Consul struct vmap_area *va; 1396db64fe02SNick Piggin struct vm_struct *area; 13971da177e4SLinus Torvalds 139852fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 13991da177e4SLinus Torvalds size = PAGE_ALIGN(size); 140031be8309SOGAWA Hirofumi if (unlikely(!size)) 140131be8309SOGAWA Hirofumi return NULL; 14021da177e4SLinus Torvalds 1403252e5c6eSzijun_hu if (flags & VM_IOREMAP) 1404252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 1405252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 1406252e5c6eSzijun_hu 1407cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 14081da177e4SLinus Torvalds if (unlikely(!area)) 14091da177e4SLinus Torvalds return NULL; 14101da177e4SLinus Torvalds 141171394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 14121da177e4SLinus Torvalds size += PAGE_SIZE; 14131da177e4SLinus Torvalds 1414db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1415db64fe02SNick Piggin if (IS_ERR(va)) { 1416db64fe02SNick Piggin kfree(area); 1417db64fe02SNick Piggin return NULL; 14181da177e4SLinus Torvalds } 14191da177e4SLinus Torvalds 1420f5252e00SMitsuo Hayasaka setup_vmalloc_vm(area, va, flags, caller); 1421f5252e00SMitsuo Hayasaka 14221da177e4SLinus Torvalds return area; 14231da177e4SLinus Torvalds } 14241da177e4SLinus Torvalds 1425930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1426930fc45aSChristoph Lameter unsigned long start, unsigned long end) 1427930fc45aSChristoph Lameter { 142800ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 142900ef2d2fSDavid Rientjes GFP_KERNEL, __builtin_return_address(0)); 1430930fc45aSChristoph Lameter } 14315992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 1432930fc45aSChristoph Lameter 1433c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1434c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 14355e6cafc8SMarek Szyprowski const void *caller) 1436c2968612SBenjamin Herrenschmidt { 143700ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 143800ef2d2fSDavid Rientjes GFP_KERNEL, caller); 1439c2968612SBenjamin Herrenschmidt } 1440c2968612SBenjamin Herrenschmidt 14411da177e4SLinus Torvalds /** 1442183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 14431da177e4SLinus Torvalds * @size: size of the area 14441da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 14451da177e4SLinus Torvalds * 14461da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 14471da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 14481da177e4SLinus Torvalds * on success or %NULL on failure. 1449a862f68aSMike Rapoport * 1450a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure. 14511da177e4SLinus Torvalds */ 14521da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 14531da177e4SLinus Torvalds { 14542dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 145500ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 145600ef2d2fSDavid Rientjes __builtin_return_address(0)); 145723016969SChristoph Lameter } 145823016969SChristoph Lameter 145923016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 14605e6cafc8SMarek Szyprowski const void *caller) 146123016969SChristoph Lameter { 14622dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 146300ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 14641da177e4SLinus Torvalds } 14651da177e4SLinus Torvalds 1466e9da6e99SMarek Szyprowski /** 1467e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 1468e9da6e99SMarek Szyprowski * @addr: base address 1469e9da6e99SMarek Szyprowski * 1470e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 1471e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 1472e9da6e99SMarek Szyprowski * pointer valid. 1473a862f68aSMike Rapoport * 1474a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 1475e9da6e99SMarek Szyprowski */ 1476e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 147783342314SNick Piggin { 1478db64fe02SNick Piggin struct vmap_area *va; 147983342314SNick Piggin 1480db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1481db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) 1482db1aecafSMinchan Kim return va->vm; 148383342314SNick Piggin 14847856dfebSAndi Kleen return NULL; 14857856dfebSAndi Kleen } 14867856dfebSAndi Kleen 14871da177e4SLinus Torvalds /** 1488183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 14891da177e4SLinus Torvalds * @addr: base address 14901da177e4SLinus Torvalds * 14911da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 14921da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 14937856dfebSAndi Kleen * on SMP machines, except for its size or flags. 1494a862f68aSMike Rapoport * 1495a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 14961da177e4SLinus Torvalds */ 1497b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 14981da177e4SLinus Torvalds { 1499db64fe02SNick Piggin struct vmap_area *va; 1500db64fe02SNick Piggin 15015803ed29SChristoph Hellwig might_sleep(); 15025803ed29SChristoph Hellwig 1503db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 1504db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) { 1505db1aecafSMinchan Kim struct vm_struct *vm = va->vm; 1506f5252e00SMitsuo Hayasaka 1507c69480adSJoonsoo Kim spin_lock(&vmap_area_lock); 1508c69480adSJoonsoo Kim va->vm = NULL; 1509c69480adSJoonsoo Kim va->flags &= ~VM_VM_AREA; 151078c72746SYisheng Xie va->flags |= VM_LAZY_FREE; 1511c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 1512c69480adSJoonsoo Kim 1513a5af5aa8SAndrey Ryabinin kasan_free_shadow(vm); 1514dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 1515dd32c279SKAMEZAWA Hiroyuki 1516db64fe02SNick Piggin return vm; 1517db64fe02SNick Piggin } 1518db64fe02SNick Piggin return NULL; 15191da177e4SLinus Torvalds } 15201da177e4SLinus Torvalds 1521868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area, 1522868b104dSRick Edgecombe int (*set_direct_map)(struct page *page)) 1523868b104dSRick Edgecombe { 1524868b104dSRick Edgecombe int i; 1525868b104dSRick Edgecombe 1526868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) 1527868b104dSRick Edgecombe if (page_address(area->pages[i])) 1528868b104dSRick Edgecombe set_direct_map(area->pages[i]); 1529868b104dSRick Edgecombe } 1530868b104dSRick Edgecombe 1531868b104dSRick Edgecombe /* Handle removing and resetting vm mappings related to the vm_struct. */ 1532868b104dSRick Edgecombe static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 1533868b104dSRick Edgecombe { 1534868b104dSRick Edgecombe unsigned long addr = (unsigned long)area->addr; 1535868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 1536868b104dSRick Edgecombe int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 1537868b104dSRick Edgecombe int i; 1538868b104dSRick Edgecombe 1539868b104dSRick Edgecombe /* 1540868b104dSRick Edgecombe * The below block can be removed when all architectures that have 1541868b104dSRick Edgecombe * direct map permissions also have set_direct_map_() implementations. 1542868b104dSRick Edgecombe * This is concerned with resetting the direct map any an vm alias with 1543868b104dSRick Edgecombe * execute permissions, without leaving a RW+X window. 1544868b104dSRick Edgecombe */ 1545868b104dSRick Edgecombe if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { 1546868b104dSRick Edgecombe set_memory_nx(addr, area->nr_pages); 1547868b104dSRick Edgecombe set_memory_rw(addr, area->nr_pages); 1548868b104dSRick Edgecombe } 1549868b104dSRick Edgecombe 1550868b104dSRick Edgecombe remove_vm_area(area->addr); 1551868b104dSRick Edgecombe 1552868b104dSRick Edgecombe /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 1553868b104dSRick Edgecombe if (!flush_reset) 1554868b104dSRick Edgecombe return; 1555868b104dSRick Edgecombe 1556868b104dSRick Edgecombe /* 1557868b104dSRick Edgecombe * If not deallocating pages, just do the flush of the VM area and 1558868b104dSRick Edgecombe * return. 1559868b104dSRick Edgecombe */ 1560868b104dSRick Edgecombe if (!deallocate_pages) { 1561868b104dSRick Edgecombe vm_unmap_aliases(); 1562868b104dSRick Edgecombe return; 1563868b104dSRick Edgecombe } 1564868b104dSRick Edgecombe 1565868b104dSRick Edgecombe /* 1566868b104dSRick Edgecombe * If execution gets here, flush the vm mapping and reset the direct 1567868b104dSRick Edgecombe * map. Find the start and end range of the direct mappings to make sure 1568868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map. 1569868b104dSRick Edgecombe */ 1570868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) { 1571868b104dSRick Edgecombe if (page_address(area->pages[i])) { 1572868b104dSRick Edgecombe start = min(addr, start); 1573868b104dSRick Edgecombe end = max(addr, end); 1574868b104dSRick Edgecombe } 1575868b104dSRick Edgecombe } 1576868b104dSRick Edgecombe 1577868b104dSRick Edgecombe /* 1578868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if 1579868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and 1580868b104dSRick Edgecombe * reset the direct map permissions to the default. 1581868b104dSRick Edgecombe */ 1582868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush); 1583868b104dSRick Edgecombe _vm_unmap_aliases(start, end, 1); 1584868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush); 1585868b104dSRick Edgecombe } 1586868b104dSRick Edgecombe 1587b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 15881da177e4SLinus Torvalds { 15891da177e4SLinus Torvalds struct vm_struct *area; 15901da177e4SLinus Torvalds 15911da177e4SLinus Torvalds if (!addr) 15921da177e4SLinus Torvalds return; 15931da177e4SLinus Torvalds 1594e69e9d4aSHATAYAMA Daisuke if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 1595ab15d9b4SDan Carpenter addr)) 15961da177e4SLinus Torvalds return; 15971da177e4SLinus Torvalds 15986ade2032SLiviu Dudau area = find_vm_area(addr); 15991da177e4SLinus Torvalds if (unlikely(!area)) { 16004c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 16011da177e4SLinus Torvalds addr); 16021da177e4SLinus Torvalds return; 16031da177e4SLinus Torvalds } 16041da177e4SLinus Torvalds 160505e3ff95SChintan Pandya debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 160605e3ff95SChintan Pandya debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 16079a11b49aSIngo Molnar 1608868b104dSRick Edgecombe vm_remove_mappings(area, deallocate_pages); 1609868b104dSRick Edgecombe 16101da177e4SLinus Torvalds if (deallocate_pages) { 16111da177e4SLinus Torvalds int i; 16121da177e4SLinus Torvalds 16131da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1614bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 1615bf53d6f8SChristoph Lameter 1616bf53d6f8SChristoph Lameter BUG_ON(!page); 16174949148aSVladimir Davydov __free_pages(page, 0); 16181da177e4SLinus Torvalds } 16191da177e4SLinus Torvalds 1620244d63eeSDavid Rientjes kvfree(area->pages); 16211da177e4SLinus Torvalds } 16221da177e4SLinus Torvalds 16231da177e4SLinus Torvalds kfree(area); 16241da177e4SLinus Torvalds return; 16251da177e4SLinus Torvalds } 16261da177e4SLinus Torvalds 1627bf22e37aSAndrey Ryabinin static inline void __vfree_deferred(const void *addr) 1628bf22e37aSAndrey Ryabinin { 1629bf22e37aSAndrey Ryabinin /* 1630bf22e37aSAndrey Ryabinin * Use raw_cpu_ptr() because this can be called from preemptible 1631bf22e37aSAndrey Ryabinin * context. Preemption is absolutely fine here, because the llist_add() 1632bf22e37aSAndrey Ryabinin * implementation is lockless, so it works even if we are adding to 1633bf22e37aSAndrey Ryabinin * nother cpu's list. schedule_work() should be fine with this too. 1634bf22e37aSAndrey Ryabinin */ 1635bf22e37aSAndrey Ryabinin struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 1636bf22e37aSAndrey Ryabinin 1637bf22e37aSAndrey Ryabinin if (llist_add((struct llist_node *)addr, &p->list)) 1638bf22e37aSAndrey Ryabinin schedule_work(&p->wq); 1639bf22e37aSAndrey Ryabinin } 1640bf22e37aSAndrey Ryabinin 1641bf22e37aSAndrey Ryabinin /** 1642bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 1643bf22e37aSAndrey Ryabinin * @addr: memory base address 1644bf22e37aSAndrey Ryabinin * 1645bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 1646bf22e37aSAndrey Ryabinin * except NMIs. 1647bf22e37aSAndrey Ryabinin */ 1648bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 1649bf22e37aSAndrey Ryabinin { 1650bf22e37aSAndrey Ryabinin BUG_ON(in_nmi()); 1651bf22e37aSAndrey Ryabinin 1652bf22e37aSAndrey Ryabinin kmemleak_free(addr); 1653bf22e37aSAndrey Ryabinin 1654bf22e37aSAndrey Ryabinin if (!addr) 1655bf22e37aSAndrey Ryabinin return; 1656bf22e37aSAndrey Ryabinin __vfree_deferred(addr); 1657bf22e37aSAndrey Ryabinin } 1658bf22e37aSAndrey Ryabinin 1659c67dc624SRoman Penyaev static void __vfree(const void *addr) 1660c67dc624SRoman Penyaev { 1661c67dc624SRoman Penyaev if (unlikely(in_interrupt())) 1662c67dc624SRoman Penyaev __vfree_deferred(addr); 1663c67dc624SRoman Penyaev else 1664c67dc624SRoman Penyaev __vunmap(addr, 1); 1665c67dc624SRoman Penyaev } 1666c67dc624SRoman Penyaev 16671da177e4SLinus Torvalds /** 16681da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 16691da177e4SLinus Torvalds * @addr: memory base address 16701da177e4SLinus Torvalds * 1671183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 167280e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 167380e93effSPekka Enberg * NULL, no operation is performed. 16741da177e4SLinus Torvalds * 167532fcfd40SAl Viro * Must not be called in NMI context (strictly speaking, only if we don't 167632fcfd40SAl Viro * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 167732fcfd40SAl Viro * conventions for vfree() arch-depenedent would be a really bad idea) 167832fcfd40SAl Viro * 16793ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context. 16803ca4ea3aSAndrey Ryabinin * 16810e056eb5Smchehab@s-opensource.com * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) 16821da177e4SLinus Torvalds */ 1683b3bdda02SChristoph Lameter void vfree(const void *addr) 16841da177e4SLinus Torvalds { 168532fcfd40SAl Viro BUG_ON(in_nmi()); 168689219d37SCatalin Marinas 168789219d37SCatalin Marinas kmemleak_free(addr); 168889219d37SCatalin Marinas 1689a8dda165SAndrey Ryabinin might_sleep_if(!in_interrupt()); 1690a8dda165SAndrey Ryabinin 169132fcfd40SAl Viro if (!addr) 169232fcfd40SAl Viro return; 1693c67dc624SRoman Penyaev 1694c67dc624SRoman Penyaev __vfree(addr); 16951da177e4SLinus Torvalds } 16961da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 16971da177e4SLinus Torvalds 16981da177e4SLinus Torvalds /** 16991da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 17001da177e4SLinus Torvalds * @addr: memory base address 17011da177e4SLinus Torvalds * 17021da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 17031da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 17041da177e4SLinus Torvalds * 170580e93effSPekka Enberg * Must not be called in interrupt context. 17061da177e4SLinus Torvalds */ 1707b3bdda02SChristoph Lameter void vunmap(const void *addr) 17081da177e4SLinus Torvalds { 17091da177e4SLinus Torvalds BUG_ON(in_interrupt()); 171034754b69SPeter Zijlstra might_sleep(); 171132fcfd40SAl Viro if (addr) 17121da177e4SLinus Torvalds __vunmap(addr, 0); 17131da177e4SLinus Torvalds } 17141da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 17151da177e4SLinus Torvalds 17161da177e4SLinus Torvalds /** 17171da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 17181da177e4SLinus Torvalds * @pages: array of page pointers 17191da177e4SLinus Torvalds * @count: number of pages to map 17201da177e4SLinus Torvalds * @flags: vm_area->flags 17211da177e4SLinus Torvalds * @prot: page protection for the mapping 17221da177e4SLinus Torvalds * 17231da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 17241da177e4SLinus Torvalds * space. 1725a862f68aSMike Rapoport * 1726a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 17271da177e4SLinus Torvalds */ 17281da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 17291da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 17301da177e4SLinus Torvalds { 17311da177e4SLinus Torvalds struct vm_struct *area; 173265ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 17331da177e4SLinus Torvalds 173434754b69SPeter Zijlstra might_sleep(); 173534754b69SPeter Zijlstra 1736ca79b0c2SArun KS if (count > totalram_pages()) 17371da177e4SLinus Torvalds return NULL; 17381da177e4SLinus Torvalds 173965ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 174065ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 17411da177e4SLinus Torvalds if (!area) 17421da177e4SLinus Torvalds return NULL; 174323016969SChristoph Lameter 1744f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) { 17451da177e4SLinus Torvalds vunmap(area->addr); 17461da177e4SLinus Torvalds return NULL; 17471da177e4SLinus Torvalds } 17481da177e4SLinus Torvalds 17491da177e4SLinus Torvalds return area->addr; 17501da177e4SLinus Torvalds } 17511da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 17521da177e4SLinus Torvalds 17538594a21cSMichal Hocko static void *__vmalloc_node(unsigned long size, unsigned long align, 17548594a21cSMichal Hocko gfp_t gfp_mask, pgprot_t prot, 17558594a21cSMichal Hocko int node, const void *caller); 1756e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 17573722e13cSWanpeng Li pgprot_t prot, int node) 17581da177e4SLinus Torvalds { 17591da177e4SLinus Torvalds struct page **pages; 17601da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 1761930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1762704b862fSLaura Abbott const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; 1763704b862fSLaura Abbott const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? 1764704b862fSLaura Abbott 0 : 1765704b862fSLaura Abbott __GFP_HIGHMEM; 17661da177e4SLinus Torvalds 1767762216abSWanpeng Li nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 17681da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 17691da177e4SLinus Torvalds 17701da177e4SLinus Torvalds area->nr_pages = nr_pages; 17711da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 17728757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 1773704b862fSLaura Abbott pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, 17743722e13cSWanpeng Li PAGE_KERNEL, node, area->caller); 1775286e1ea3SAndrew Morton } else { 1776976d6dfbSJan Beulich pages = kmalloc_node(array_size, nested_gfp, node); 1777286e1ea3SAndrew Morton } 17781da177e4SLinus Torvalds area->pages = pages; 17791da177e4SLinus Torvalds if (!area->pages) { 17801da177e4SLinus Torvalds remove_vm_area(area->addr); 17811da177e4SLinus Torvalds kfree(area); 17821da177e4SLinus Torvalds return NULL; 17831da177e4SLinus Torvalds } 17841da177e4SLinus Torvalds 17851da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 1786bf53d6f8SChristoph Lameter struct page *page; 1787bf53d6f8SChristoph Lameter 17884b90951cSJianguo Wu if (node == NUMA_NO_NODE) 1789704b862fSLaura Abbott page = alloc_page(alloc_mask|highmem_mask); 1790930fc45aSChristoph Lameter else 1791704b862fSLaura Abbott page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); 1792bf53d6f8SChristoph Lameter 1793bf53d6f8SChristoph Lameter if (unlikely(!page)) { 17941da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 17951da177e4SLinus Torvalds area->nr_pages = i; 17961da177e4SLinus Torvalds goto fail; 17971da177e4SLinus Torvalds } 1798bf53d6f8SChristoph Lameter area->pages[i] = page; 1799704b862fSLaura Abbott if (gfpflags_allow_blocking(gfp_mask|highmem_mask)) 1800660654f9SEric Dumazet cond_resched(); 18011da177e4SLinus Torvalds } 18021da177e4SLinus Torvalds 1803f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) 18041da177e4SLinus Torvalds goto fail; 18051da177e4SLinus Torvalds return area->addr; 18061da177e4SLinus Torvalds 18071da177e4SLinus Torvalds fail: 1808a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 18097877cdccSMichal Hocko "vmalloc: allocation failure, allocated %ld of %ld bytes", 181022943ab1SDave Hansen (area->nr_pages*PAGE_SIZE), area->size); 1811c67dc624SRoman Penyaev __vfree(area->addr); 18121da177e4SLinus Torvalds return NULL; 18131da177e4SLinus Torvalds } 18141da177e4SLinus Torvalds 1815d0a21265SDavid Rientjes /** 1816d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 1817d0a21265SDavid Rientjes * @size: allocation size 1818d0a21265SDavid Rientjes * @align: desired alignment 1819d0a21265SDavid Rientjes * @start: vm area range start 1820d0a21265SDavid Rientjes * @end: vm area range end 1821d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 1822d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 1823cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 182400ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 1825d0a21265SDavid Rientjes * @caller: caller's return address 1826d0a21265SDavid Rientjes * 1827d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 1828d0a21265SDavid Rientjes * allocator with @gfp_mask flags. Map them into contiguous 1829d0a21265SDavid Rientjes * kernel virtual space, using a pagetable protection of @prot. 1830a862f68aSMike Rapoport * 1831a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 1832d0a21265SDavid Rientjes */ 1833d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 1834d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 1835cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 1836cb9e3c29SAndrey Ryabinin const void *caller) 1837930fc45aSChristoph Lameter { 1838d0a21265SDavid Rientjes struct vm_struct *area; 1839d0a21265SDavid Rientjes void *addr; 1840d0a21265SDavid Rientjes unsigned long real_size = size; 1841d0a21265SDavid Rientjes 1842d0a21265SDavid Rientjes size = PAGE_ALIGN(size); 1843ca79b0c2SArun KS if (!size || (size >> PAGE_SHIFT) > totalram_pages()) 1844de7d2b56SJoe Perches goto fail; 1845d0a21265SDavid Rientjes 1846cb9e3c29SAndrey Ryabinin area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | 1847cb9e3c29SAndrey Ryabinin vm_flags, start, end, node, gfp_mask, caller); 1848d0a21265SDavid Rientjes if (!area) 1849de7d2b56SJoe Perches goto fail; 1850d0a21265SDavid Rientjes 18513722e13cSWanpeng Li addr = __vmalloc_area_node(area, gfp_mask, prot, node); 18521368edf0SMel Gorman if (!addr) 1853b82225f3SWanpeng Li return NULL; 185489219d37SCatalin Marinas 185589219d37SCatalin Marinas /* 185620fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 185720fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 18584341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 1859f5252e00SMitsuo Hayasaka */ 186020fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 1861f5252e00SMitsuo Hayasaka 186294f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask); 186389219d37SCatalin Marinas 186489219d37SCatalin Marinas return addr; 1865de7d2b56SJoe Perches 1866de7d2b56SJoe Perches fail: 1867a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 18687877cdccSMichal Hocko "vmalloc: allocation failure: %lu bytes", real_size); 1869de7d2b56SJoe Perches return NULL; 1870930fc45aSChristoph Lameter } 1871930fc45aSChristoph Lameter 1872153178edSUladzislau Rezki (Sony) /* 1873153178edSUladzislau Rezki (Sony) * This is only for performance analysis of vmalloc and stress purpose. 1874153178edSUladzislau Rezki (Sony) * It is required by vmalloc test module, therefore do not use it other 1875153178edSUladzislau Rezki (Sony) * than that. 1876153178edSUladzislau Rezki (Sony) */ 1877153178edSUladzislau Rezki (Sony) #ifdef CONFIG_TEST_VMALLOC_MODULE 1878153178edSUladzislau Rezki (Sony) EXPORT_SYMBOL_GPL(__vmalloc_node_range); 1879153178edSUladzislau Rezki (Sony) #endif 1880153178edSUladzislau Rezki (Sony) 18811da177e4SLinus Torvalds /** 1882930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 18831da177e4SLinus Torvalds * @size: allocation size 18842dca6999SDavid Miller * @align: desired alignment 18851da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 18861da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 188700ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 1888c85d194bSRandy Dunlap * @caller: caller's return address 18891da177e4SLinus Torvalds * 18901da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 18911da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 18921da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 1893a7c3e901SMichal Hocko * 1894dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 1895a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported 1896a7c3e901SMichal Hocko * 1897a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted 1898a7c3e901SMichal Hocko * with mm people. 1899a862f68aSMike Rapoport * 1900a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 19011da177e4SLinus Torvalds */ 19028594a21cSMichal Hocko static void *__vmalloc_node(unsigned long size, unsigned long align, 19032dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 19045e6cafc8SMarek Szyprowski int node, const void *caller) 19051da177e4SLinus Torvalds { 1906d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 1907cb9e3c29SAndrey Ryabinin gfp_mask, prot, 0, node, caller); 19081da177e4SLinus Torvalds } 19091da177e4SLinus Torvalds 1910930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1911930fc45aSChristoph Lameter { 191200ef2d2fSDavid Rientjes return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 191323016969SChristoph Lameter __builtin_return_address(0)); 1914930fc45aSChristoph Lameter } 19151da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 19161da177e4SLinus Torvalds 19178594a21cSMichal Hocko static inline void *__vmalloc_node_flags(unsigned long size, 19188594a21cSMichal Hocko int node, gfp_t flags) 19198594a21cSMichal Hocko { 19208594a21cSMichal Hocko return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 19218594a21cSMichal Hocko node, __builtin_return_address(0)); 19228594a21cSMichal Hocko } 19238594a21cSMichal Hocko 19248594a21cSMichal Hocko 19258594a21cSMichal Hocko void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, 19268594a21cSMichal Hocko void *caller) 19278594a21cSMichal Hocko { 19288594a21cSMichal Hocko return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller); 19298594a21cSMichal Hocko } 19308594a21cSMichal Hocko 19311da177e4SLinus Torvalds /** 19321da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 19331da177e4SLinus Torvalds * @size: allocation size 193492eac168SMike Rapoport * 19351da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 19361da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 19371da177e4SLinus Torvalds * 1938c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 19391da177e4SLinus Torvalds * use __vmalloc() instead. 1940a862f68aSMike Rapoport * 1941a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 19421da177e4SLinus Torvalds */ 19431da177e4SLinus Torvalds void *vmalloc(unsigned long size) 19441da177e4SLinus Torvalds { 194500ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 194619809c2dSMichal Hocko GFP_KERNEL); 19471da177e4SLinus Torvalds } 19481da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 19491da177e4SLinus Torvalds 1950930fc45aSChristoph Lameter /** 1951e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 1952e1ca7788SDave Young * @size: allocation size 195392eac168SMike Rapoport * 1954e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 1955e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 1956e1ca7788SDave Young * The memory allocated is set to zero. 1957e1ca7788SDave Young * 1958e1ca7788SDave Young * For tight control over page level allocator and protection flags 1959e1ca7788SDave Young * use __vmalloc() instead. 1960a862f68aSMike Rapoport * 1961a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 1962e1ca7788SDave Young */ 1963e1ca7788SDave Young void *vzalloc(unsigned long size) 1964e1ca7788SDave Young { 196500ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 196619809c2dSMichal Hocko GFP_KERNEL | __GFP_ZERO); 1967e1ca7788SDave Young } 1968e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 1969e1ca7788SDave Young 1970e1ca7788SDave Young /** 1971ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 197283342314SNick Piggin * @size: allocation size 1973ead04089SRolf Eike Beer * 1974ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 1975ead04089SRolf Eike Beer * without leaking data. 1976a862f68aSMike Rapoport * 1977a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 197883342314SNick Piggin */ 197983342314SNick Piggin void *vmalloc_user(unsigned long size) 198083342314SNick Piggin { 1981bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 1982bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 1983bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 198400ef2d2fSDavid Rientjes __builtin_return_address(0)); 198583342314SNick Piggin } 198683342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 198783342314SNick Piggin 198883342314SNick Piggin /** 1989930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 1990930fc45aSChristoph Lameter * @size: allocation size 1991d44e0780SRandy Dunlap * @node: numa node 1992930fc45aSChristoph Lameter * 1993930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 1994930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 1995930fc45aSChristoph Lameter * 1996c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 1997930fc45aSChristoph Lameter * use __vmalloc() instead. 1998a862f68aSMike Rapoport * 1999a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2000930fc45aSChristoph Lameter */ 2001930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 2002930fc45aSChristoph Lameter { 200319809c2dSMichal Hocko return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, 200423016969SChristoph Lameter node, __builtin_return_address(0)); 2005930fc45aSChristoph Lameter } 2006930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 2007930fc45aSChristoph Lameter 2008e1ca7788SDave Young /** 2009e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 2010e1ca7788SDave Young * @size: allocation size 2011e1ca7788SDave Young * @node: numa node 2012e1ca7788SDave Young * 2013e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 2014e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 2015e1ca7788SDave Young * The memory allocated is set to zero. 2016e1ca7788SDave Young * 2017e1ca7788SDave Young * For tight control over page level allocator and protection flags 2018e1ca7788SDave Young * use __vmalloc_node() instead. 2019a862f68aSMike Rapoport * 2020a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2021e1ca7788SDave Young */ 2022e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 2023e1ca7788SDave Young { 2024e1ca7788SDave Young return __vmalloc_node_flags(size, node, 202519809c2dSMichal Hocko GFP_KERNEL | __GFP_ZERO); 2026e1ca7788SDave Young } 2027e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 2028e1ca7788SDave Young 20291da177e4SLinus Torvalds /** 20301da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 20311da177e4SLinus Torvalds * @size: allocation size 20321da177e4SLinus Torvalds * 20331da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 20341da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 20351da177e4SLinus Torvalds * executable kernel virtual space. 20361da177e4SLinus Torvalds * 2037c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 20381da177e4SLinus Torvalds * use __vmalloc() instead. 2039a862f68aSMike Rapoport * 2040a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 20411da177e4SLinus Torvalds */ 20421da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 20431da177e4SLinus Torvalds { 2044868b104dSRick Edgecombe return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 2045868b104dSRick Edgecombe GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, 204600ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 20471da177e4SLinus Torvalds } 20481da177e4SLinus Torvalds 20490d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 2050698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 20510d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 2052698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 20530d08e0d3SAndi Kleen #else 2054698d0831SMichal Hocko /* 2055698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others 2056698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone. 2057698d0831SMichal Hocko */ 2058698d0831SMichal Hocko #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 20590d08e0d3SAndi Kleen #endif 20600d08e0d3SAndi Kleen 20611da177e4SLinus Torvalds /** 20621da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 20631da177e4SLinus Torvalds * @size: allocation size 20641da177e4SLinus Torvalds * 20651da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 20661da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 2067a862f68aSMike Rapoport * 2068a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 20691da177e4SLinus Torvalds */ 20701da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 20711da177e4SLinus Torvalds { 20722dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 207300ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 20741da177e4SLinus Torvalds } 20751da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 20761da177e4SLinus Torvalds 207783342314SNick Piggin /** 2078ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 207983342314SNick Piggin * @size: allocation size 2080ead04089SRolf Eike Beer * 2081ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 2082ead04089SRolf Eike Beer * mapped to userspace without leaking data. 2083a862f68aSMike Rapoport * 2084a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 208583342314SNick Piggin */ 208683342314SNick Piggin void *vmalloc_32_user(unsigned long size) 208783342314SNick Piggin { 2088bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2089bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 2090bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 20915a82ac71SRoman Penyaev __builtin_return_address(0)); 209283342314SNick Piggin } 209383342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 209483342314SNick Piggin 2095d0107eb0SKAMEZAWA Hiroyuki /* 2096d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 2097d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 2098d0107eb0SKAMEZAWA Hiroyuki */ 2099d0107eb0SKAMEZAWA Hiroyuki 2100d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 2101d0107eb0SKAMEZAWA Hiroyuki { 2102d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2103d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2104d0107eb0SKAMEZAWA Hiroyuki 2105d0107eb0SKAMEZAWA Hiroyuki while (count) { 2106d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2107d0107eb0SKAMEZAWA Hiroyuki 2108891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2109d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2110d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2111d0107eb0SKAMEZAWA Hiroyuki length = count; 2112d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2113d0107eb0SKAMEZAWA Hiroyuki /* 2114d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2115d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2116d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2117d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2118d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2119d0107eb0SKAMEZAWA Hiroyuki */ 2120d0107eb0SKAMEZAWA Hiroyuki if (p) { 2121d0107eb0SKAMEZAWA Hiroyuki /* 2122d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2123d0107eb0SKAMEZAWA Hiroyuki * function description) 2124d0107eb0SKAMEZAWA Hiroyuki */ 21259b04c5feSCong Wang void *map = kmap_atomic(p); 2126d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 21279b04c5feSCong Wang kunmap_atomic(map); 2128d0107eb0SKAMEZAWA Hiroyuki } else 2129d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 2130d0107eb0SKAMEZAWA Hiroyuki 2131d0107eb0SKAMEZAWA Hiroyuki addr += length; 2132d0107eb0SKAMEZAWA Hiroyuki buf += length; 2133d0107eb0SKAMEZAWA Hiroyuki copied += length; 2134d0107eb0SKAMEZAWA Hiroyuki count -= length; 2135d0107eb0SKAMEZAWA Hiroyuki } 2136d0107eb0SKAMEZAWA Hiroyuki return copied; 2137d0107eb0SKAMEZAWA Hiroyuki } 2138d0107eb0SKAMEZAWA Hiroyuki 2139d0107eb0SKAMEZAWA Hiroyuki static int aligned_vwrite(char *buf, char *addr, unsigned long count) 2140d0107eb0SKAMEZAWA Hiroyuki { 2141d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2142d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2143d0107eb0SKAMEZAWA Hiroyuki 2144d0107eb0SKAMEZAWA Hiroyuki while (count) { 2145d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2146d0107eb0SKAMEZAWA Hiroyuki 2147891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2148d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2149d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2150d0107eb0SKAMEZAWA Hiroyuki length = count; 2151d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2152d0107eb0SKAMEZAWA Hiroyuki /* 2153d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2154d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2155d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2156d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2157d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2158d0107eb0SKAMEZAWA Hiroyuki */ 2159d0107eb0SKAMEZAWA Hiroyuki if (p) { 2160d0107eb0SKAMEZAWA Hiroyuki /* 2161d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2162d0107eb0SKAMEZAWA Hiroyuki * function description) 2163d0107eb0SKAMEZAWA Hiroyuki */ 21649b04c5feSCong Wang void *map = kmap_atomic(p); 2165d0107eb0SKAMEZAWA Hiroyuki memcpy(map + offset, buf, length); 21669b04c5feSCong Wang kunmap_atomic(map); 2167d0107eb0SKAMEZAWA Hiroyuki } 2168d0107eb0SKAMEZAWA Hiroyuki addr += length; 2169d0107eb0SKAMEZAWA Hiroyuki buf += length; 2170d0107eb0SKAMEZAWA Hiroyuki copied += length; 2171d0107eb0SKAMEZAWA Hiroyuki count -= length; 2172d0107eb0SKAMEZAWA Hiroyuki } 2173d0107eb0SKAMEZAWA Hiroyuki return copied; 2174d0107eb0SKAMEZAWA Hiroyuki } 2175d0107eb0SKAMEZAWA Hiroyuki 2176d0107eb0SKAMEZAWA Hiroyuki /** 2177d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 2178d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 2179d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2180d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2181d0107eb0SKAMEZAWA Hiroyuki * 2182d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2183d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 2184d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 2185d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 2186d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2187d0107eb0SKAMEZAWA Hiroyuki * 2188d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2189a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2190d0107eb0SKAMEZAWA Hiroyuki * 2191d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 2192d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2193d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2194d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2195a862f68aSMike Rapoport * 2196a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased 2197a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't 2198a862f68aSMike Rapoport * include any intersection with valid vmalloc area 2199d0107eb0SKAMEZAWA Hiroyuki */ 22001da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 22011da177e4SLinus Torvalds { 2202e81ce85fSJoonsoo Kim struct vmap_area *va; 2203e81ce85fSJoonsoo Kim struct vm_struct *vm; 22041da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 2205d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 22061da177e4SLinus Torvalds unsigned long n; 22071da177e4SLinus Torvalds 22081da177e4SLinus Torvalds /* Don't allow overflow */ 22091da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 22101da177e4SLinus Torvalds count = -(unsigned long) addr; 22111da177e4SLinus Torvalds 2212e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2213e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2214e81ce85fSJoonsoo Kim if (!count) 2215e81ce85fSJoonsoo Kim break; 2216e81ce85fSJoonsoo Kim 2217e81ce85fSJoonsoo Kim if (!(va->flags & VM_VM_AREA)) 2218e81ce85fSJoonsoo Kim continue; 2219e81ce85fSJoonsoo Kim 2220e81ce85fSJoonsoo Kim vm = va->vm; 2221e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2222762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 22231da177e4SLinus Torvalds continue; 22241da177e4SLinus Torvalds while (addr < vaddr) { 22251da177e4SLinus Torvalds if (count == 0) 22261da177e4SLinus Torvalds goto finished; 22271da177e4SLinus Torvalds *buf = '\0'; 22281da177e4SLinus Torvalds buf++; 22291da177e4SLinus Torvalds addr++; 22301da177e4SLinus Torvalds count--; 22311da177e4SLinus Torvalds } 2232762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2233d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2234d0107eb0SKAMEZAWA Hiroyuki n = count; 2235e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) 2236d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 2237d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 2238d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 2239d0107eb0SKAMEZAWA Hiroyuki buf += n; 2240d0107eb0SKAMEZAWA Hiroyuki addr += n; 2241d0107eb0SKAMEZAWA Hiroyuki count -= n; 22421da177e4SLinus Torvalds } 22431da177e4SLinus Torvalds finished: 2244e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2245d0107eb0SKAMEZAWA Hiroyuki 2246d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 2247d0107eb0SKAMEZAWA Hiroyuki return 0; 2248d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 2249d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 2250d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 2251d0107eb0SKAMEZAWA Hiroyuki 2252d0107eb0SKAMEZAWA Hiroyuki return buflen; 22531da177e4SLinus Torvalds } 22541da177e4SLinus Torvalds 2255d0107eb0SKAMEZAWA Hiroyuki /** 2256d0107eb0SKAMEZAWA Hiroyuki * vwrite() - write vmalloc area in a safe way. 2257d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for source data 2258d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2259d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2260d0107eb0SKAMEZAWA Hiroyuki * 2261d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2262d0107eb0SKAMEZAWA Hiroyuki * copy data from a buffer to the given addr. If specified range of 2263d0107eb0SKAMEZAWA Hiroyuki * [addr...addr+count) includes some valid address, data is copied from 2264d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, no copy to hole. 2265d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2266d0107eb0SKAMEZAWA Hiroyuki * 2267d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2268a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2269d0107eb0SKAMEZAWA Hiroyuki * 2270d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vwrite() is never necessary because the caller 2271d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2272d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2273d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2274a862f68aSMike Rapoport * 2275a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be 2276a862f68aSMike Rapoport * increased (same number as @count) or %0 if [addr...addr+count) 2277a862f68aSMike Rapoport * doesn't include any intersection with valid vmalloc area 2278d0107eb0SKAMEZAWA Hiroyuki */ 22791da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 22801da177e4SLinus Torvalds { 2281e81ce85fSJoonsoo Kim struct vmap_area *va; 2282e81ce85fSJoonsoo Kim struct vm_struct *vm; 2283d0107eb0SKAMEZAWA Hiroyuki char *vaddr; 2284d0107eb0SKAMEZAWA Hiroyuki unsigned long n, buflen; 2285d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 22861da177e4SLinus Torvalds 22871da177e4SLinus Torvalds /* Don't allow overflow */ 22881da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 22891da177e4SLinus Torvalds count = -(unsigned long) addr; 2290d0107eb0SKAMEZAWA Hiroyuki buflen = count; 22911da177e4SLinus Torvalds 2292e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2293e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2294e81ce85fSJoonsoo Kim if (!count) 2295e81ce85fSJoonsoo Kim break; 2296e81ce85fSJoonsoo Kim 2297e81ce85fSJoonsoo Kim if (!(va->flags & VM_VM_AREA)) 2298e81ce85fSJoonsoo Kim continue; 2299e81ce85fSJoonsoo Kim 2300e81ce85fSJoonsoo Kim vm = va->vm; 2301e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2302762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 23031da177e4SLinus Torvalds continue; 23041da177e4SLinus Torvalds while (addr < vaddr) { 23051da177e4SLinus Torvalds if (count == 0) 23061da177e4SLinus Torvalds goto finished; 23071da177e4SLinus Torvalds buf++; 23081da177e4SLinus Torvalds addr++; 23091da177e4SLinus Torvalds count--; 23101da177e4SLinus Torvalds } 2311762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2312d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2313d0107eb0SKAMEZAWA Hiroyuki n = count; 2314e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) { 2315d0107eb0SKAMEZAWA Hiroyuki aligned_vwrite(buf, addr, n); 2316d0107eb0SKAMEZAWA Hiroyuki copied++; 2317d0107eb0SKAMEZAWA Hiroyuki } 2318d0107eb0SKAMEZAWA Hiroyuki buf += n; 2319d0107eb0SKAMEZAWA Hiroyuki addr += n; 2320d0107eb0SKAMEZAWA Hiroyuki count -= n; 23211da177e4SLinus Torvalds } 23221da177e4SLinus Torvalds finished: 2323e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2324d0107eb0SKAMEZAWA Hiroyuki if (!copied) 2325d0107eb0SKAMEZAWA Hiroyuki return 0; 2326d0107eb0SKAMEZAWA Hiroyuki return buflen; 23271da177e4SLinus Torvalds } 232883342314SNick Piggin 232983342314SNick Piggin /** 2330e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 2331e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 2332e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 2333e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 2334e69e9d4aSHATAYAMA Daisuke * @size: size of map area 2335e69e9d4aSHATAYAMA Daisuke * 2336e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 2337e69e9d4aSHATAYAMA Daisuke * 2338e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 2339e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 2340e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 2341e69e9d4aSHATAYAMA Daisuke * met. 2342e69e9d4aSHATAYAMA Daisuke * 2343e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 2344e69e9d4aSHATAYAMA Daisuke */ 2345e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 2346e69e9d4aSHATAYAMA Daisuke void *kaddr, unsigned long size) 2347e69e9d4aSHATAYAMA Daisuke { 2348e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 2349e69e9d4aSHATAYAMA Daisuke 2350e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 2351e69e9d4aSHATAYAMA Daisuke 2352e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 2353e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2354e69e9d4aSHATAYAMA Daisuke 2355e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 2356e69e9d4aSHATAYAMA Daisuke if (!area) 2357e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2358e69e9d4aSHATAYAMA Daisuke 2359e69e9d4aSHATAYAMA Daisuke if (!(area->flags & VM_USERMAP)) 2360e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2361e69e9d4aSHATAYAMA Daisuke 2362401592d2SRoman Penyaev if (kaddr + size > area->addr + get_vm_area_size(area)) 2363e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2364e69e9d4aSHATAYAMA Daisuke 2365e69e9d4aSHATAYAMA Daisuke do { 2366e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 2367e69e9d4aSHATAYAMA Daisuke int ret; 2368e69e9d4aSHATAYAMA Daisuke 2369e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 2370e69e9d4aSHATAYAMA Daisuke if (ret) 2371e69e9d4aSHATAYAMA Daisuke return ret; 2372e69e9d4aSHATAYAMA Daisuke 2373e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 2374e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 2375e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 2376e69e9d4aSHATAYAMA Daisuke } while (size > 0); 2377e69e9d4aSHATAYAMA Daisuke 2378e69e9d4aSHATAYAMA Daisuke vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 2379e69e9d4aSHATAYAMA Daisuke 2380e69e9d4aSHATAYAMA Daisuke return 0; 2381e69e9d4aSHATAYAMA Daisuke } 2382e69e9d4aSHATAYAMA Daisuke EXPORT_SYMBOL(remap_vmalloc_range_partial); 2383e69e9d4aSHATAYAMA Daisuke 2384e69e9d4aSHATAYAMA Daisuke /** 238583342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 238683342314SNick Piggin * @vma: vma to cover (map full range of vma) 238783342314SNick Piggin * @addr: vmalloc memory 238883342314SNick Piggin * @pgoff: number of pages into addr before first page to map 23897682486bSRandy Dunlap * 23907682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 239183342314SNick Piggin * 239283342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 239383342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 239483342314SNick Piggin * that criteria isn't met. 239583342314SNick Piggin * 239672fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 239783342314SNick Piggin */ 239883342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 239983342314SNick Piggin unsigned long pgoff) 240083342314SNick Piggin { 2401e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 2402e69e9d4aSHATAYAMA Daisuke addr + (pgoff << PAGE_SHIFT), 2403e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 240483342314SNick Piggin } 240583342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 240683342314SNick Piggin 24071eeb66a1SChristoph Hellwig /* 24081eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 24091eeb66a1SChristoph Hellwig * have one. 24101eeb66a1SChristoph Hellwig */ 24113b32123dSGideon Israel Dsouza void __weak vmalloc_sync_all(void) 24121eeb66a1SChristoph Hellwig { 24131eeb66a1SChristoph Hellwig } 24145f4352fbSJeremy Fitzhardinge 24155f4352fbSJeremy Fitzhardinge 24162f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 24175f4352fbSJeremy Fitzhardinge { 2418cd12909cSDavid Vrabel pte_t ***p = data; 2419cd12909cSDavid Vrabel 2420cd12909cSDavid Vrabel if (p) { 2421cd12909cSDavid Vrabel *(*p) = pte; 2422cd12909cSDavid Vrabel (*p)++; 2423cd12909cSDavid Vrabel } 24245f4352fbSJeremy Fitzhardinge return 0; 24255f4352fbSJeremy Fitzhardinge } 24265f4352fbSJeremy Fitzhardinge 24275f4352fbSJeremy Fitzhardinge /** 24285f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 24295f4352fbSJeremy Fitzhardinge * @size: size of the area 2430cd12909cSDavid Vrabel * @ptes: returns the PTEs for the address space 24317682486bSRandy Dunlap * 24327682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 24335f4352fbSJeremy Fitzhardinge * 24345f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 24355f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 2436cd12909cSDavid Vrabel * are created. 2437cd12909cSDavid Vrabel * 2438cd12909cSDavid Vrabel * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 2439cd12909cSDavid Vrabel * allocated for the VM area are returned. 24405f4352fbSJeremy Fitzhardinge */ 2441cd12909cSDavid Vrabel struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 24425f4352fbSJeremy Fitzhardinge { 24435f4352fbSJeremy Fitzhardinge struct vm_struct *area; 24445f4352fbSJeremy Fitzhardinge 244523016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 244623016969SChristoph Lameter __builtin_return_address(0)); 24475f4352fbSJeremy Fitzhardinge if (area == NULL) 24485f4352fbSJeremy Fitzhardinge return NULL; 24495f4352fbSJeremy Fitzhardinge 24505f4352fbSJeremy Fitzhardinge /* 24515f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 24525f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 24535f4352fbSJeremy Fitzhardinge */ 24545f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2455cd12909cSDavid Vrabel size, f, ptes ? &ptes : NULL)) { 24565f4352fbSJeremy Fitzhardinge free_vm_area(area); 24575f4352fbSJeremy Fitzhardinge return NULL; 24585f4352fbSJeremy Fitzhardinge } 24595f4352fbSJeremy Fitzhardinge 24605f4352fbSJeremy Fitzhardinge return area; 24615f4352fbSJeremy Fitzhardinge } 24625f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 24635f4352fbSJeremy Fitzhardinge 24645f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 24655f4352fbSJeremy Fitzhardinge { 24665f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 24675f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 24685f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 24695f4352fbSJeremy Fitzhardinge kfree(area); 24705f4352fbSJeremy Fitzhardinge } 24715f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 2472a10aa579SChristoph Lameter 24734f8b02b4STejun Heo #ifdef CONFIG_SMP 2474ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 2475ca23e405STejun Heo { 24764583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 2477ca23e405STejun Heo } 2478ca23e405STejun Heo 2479ca23e405STejun Heo /** 2480ca23e405STejun Heo * pvm_find_next_prev - find the next and prev vmap_area surrounding @end 2481ca23e405STejun Heo * @end: target address 2482ca23e405STejun Heo * @pnext: out arg for the next vmap_area 2483ca23e405STejun Heo * @pprev: out arg for the previous vmap_area 2484ca23e405STejun Heo * 2485ca23e405STejun Heo * Returns: %true if either or both of next and prev are found, 2486ca23e405STejun Heo * %false if no vmap_area exists 2487ca23e405STejun Heo * 2488ca23e405STejun Heo * Find vmap_areas end addresses of which enclose @end. ie. if not 2489ca23e405STejun Heo * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. 2490ca23e405STejun Heo */ 2491ca23e405STejun Heo static bool pvm_find_next_prev(unsigned long end, 2492ca23e405STejun Heo struct vmap_area **pnext, 2493ca23e405STejun Heo struct vmap_area **pprev) 2494ca23e405STejun Heo { 2495ca23e405STejun Heo struct rb_node *n = vmap_area_root.rb_node; 2496ca23e405STejun Heo struct vmap_area *va = NULL; 2497ca23e405STejun Heo 2498ca23e405STejun Heo while (n) { 2499ca23e405STejun Heo va = rb_entry(n, struct vmap_area, rb_node); 2500ca23e405STejun Heo if (end < va->va_end) 2501ca23e405STejun Heo n = n->rb_left; 2502ca23e405STejun Heo else if (end > va->va_end) 2503ca23e405STejun Heo n = n->rb_right; 2504ca23e405STejun Heo else 2505ca23e405STejun Heo break; 2506ca23e405STejun Heo } 2507ca23e405STejun Heo 2508ca23e405STejun Heo if (!va) 2509ca23e405STejun Heo return false; 2510ca23e405STejun Heo 2511ca23e405STejun Heo if (va->va_end > end) { 2512ca23e405STejun Heo *pnext = va; 2513ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2514ca23e405STejun Heo } else { 2515ca23e405STejun Heo *pprev = va; 2516ca23e405STejun Heo *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); 2517ca23e405STejun Heo } 2518ca23e405STejun Heo return true; 2519ca23e405STejun Heo } 2520ca23e405STejun Heo 2521ca23e405STejun Heo /** 2522ca23e405STejun Heo * pvm_determine_end - find the highest aligned address between two vmap_areas 2523ca23e405STejun Heo * @pnext: in/out arg for the next vmap_area 2524ca23e405STejun Heo * @pprev: in/out arg for the previous vmap_area 2525ca23e405STejun Heo * @align: alignment 2526ca23e405STejun Heo * 2527ca23e405STejun Heo * Returns: determined end address 2528ca23e405STejun Heo * 2529ca23e405STejun Heo * Find the highest aligned address between *@pnext and *@pprev below 2530ca23e405STejun Heo * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned 2531ca23e405STejun Heo * down address is between the end addresses of the two vmap_areas. 2532ca23e405STejun Heo * 2533ca23e405STejun Heo * Please note that the address returned by this function may fall 2534ca23e405STejun Heo * inside *@pnext vmap_area. The caller is responsible for checking 2535ca23e405STejun Heo * that. 2536ca23e405STejun Heo */ 2537ca23e405STejun Heo static unsigned long pvm_determine_end(struct vmap_area **pnext, 2538ca23e405STejun Heo struct vmap_area **pprev, 2539ca23e405STejun Heo unsigned long align) 2540ca23e405STejun Heo { 2541ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2542ca23e405STejun Heo unsigned long addr; 2543ca23e405STejun Heo 2544ca23e405STejun Heo if (*pnext) 2545ca23e405STejun Heo addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); 2546ca23e405STejun Heo else 2547ca23e405STejun Heo addr = vmalloc_end; 2548ca23e405STejun Heo 2549ca23e405STejun Heo while (*pprev && (*pprev)->va_end > addr) { 2550ca23e405STejun Heo *pnext = *pprev; 2551ca23e405STejun Heo *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2552ca23e405STejun Heo } 2553ca23e405STejun Heo 2554ca23e405STejun Heo return addr; 2555ca23e405STejun Heo } 2556ca23e405STejun Heo 2557ca23e405STejun Heo /** 2558ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 2559ca23e405STejun Heo * @offsets: array containing offset of each area 2560ca23e405STejun Heo * @sizes: array containing size of each area 2561ca23e405STejun Heo * @nr_vms: the number of areas to allocate 2562ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2563ca23e405STejun Heo * 2564ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2565ca23e405STejun Heo * vm_structs on success, %NULL on failure 2566ca23e405STejun Heo * 2567ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 2568ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 2569ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 2570ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 2571ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 2572ec3f64fcSDavid Rientjes * areas are allocated from top. 2573ca23e405STejun Heo * 2574ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 2575ca23e405STejun Heo * does everything top-down and scans areas from the end looking for 2576ca23e405STejun Heo * matching slot. While scanning, if any of the areas overlaps with 2577ca23e405STejun Heo * existing vmap_area, the base address is pulled down to fit the 2578ca23e405STejun Heo * area. Scanning is repeated till all the areas fit and then all 2579c568da28SWei Yang * necessary data structures are inserted and the result is returned. 2580ca23e405STejun Heo */ 2581ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2582ca23e405STejun Heo const size_t *sizes, int nr_vms, 2583ec3f64fcSDavid Rientjes size_t align) 2584ca23e405STejun Heo { 2585ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2586ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2587ca23e405STejun Heo struct vmap_area **vas, *prev, *next; 2588ca23e405STejun Heo struct vm_struct **vms; 2589ca23e405STejun Heo int area, area2, last_area, term_area; 2590ca23e405STejun Heo unsigned long base, start, end, last_end; 2591ca23e405STejun Heo bool purged = false; 2592ca23e405STejun Heo 2593ca23e405STejun Heo /* verify parameters and allocate data structures */ 2594891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 2595ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 2596ca23e405STejun Heo start = offsets[area]; 2597ca23e405STejun Heo end = start + sizes[area]; 2598ca23e405STejun Heo 2599ca23e405STejun Heo /* is everything aligned properly? */ 2600ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 2601ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 2602ca23e405STejun Heo 2603ca23e405STejun Heo /* detect the area with the highest address */ 2604ca23e405STejun Heo if (start > offsets[last_area]) 2605ca23e405STejun Heo last_area = area; 2606ca23e405STejun Heo 2607c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) { 2608ca23e405STejun Heo unsigned long start2 = offsets[area2]; 2609ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 2610ca23e405STejun Heo 2611c568da28SWei Yang BUG_ON(start2 < end && start < end2); 2612ca23e405STejun Heo } 2613ca23e405STejun Heo } 2614ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 2615ca23e405STejun Heo 2616ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 2617ca23e405STejun Heo WARN_ON(true); 2618ca23e405STejun Heo return NULL; 2619ca23e405STejun Heo } 2620ca23e405STejun Heo 26214d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 26224d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 2623ca23e405STejun Heo if (!vas || !vms) 2624f1db7afdSKautuk Consul goto err_free2; 2625ca23e405STejun Heo 2626ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2627ec3f64fcSDavid Rientjes vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); 2628ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 2629ca23e405STejun Heo if (!vas[area] || !vms[area]) 2630ca23e405STejun Heo goto err_free; 2631ca23e405STejun Heo } 2632ca23e405STejun Heo retry: 2633ca23e405STejun Heo spin_lock(&vmap_area_lock); 2634ca23e405STejun Heo 2635ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 2636ca23e405STejun Heo area = term_area = last_area; 2637ca23e405STejun Heo start = offsets[area]; 2638ca23e405STejun Heo end = start + sizes[area]; 2639ca23e405STejun Heo 2640ca23e405STejun Heo if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { 2641ca23e405STejun Heo base = vmalloc_end - last_end; 2642ca23e405STejun Heo goto found; 2643ca23e405STejun Heo } 2644ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2645ca23e405STejun Heo 2646ca23e405STejun Heo while (true) { 2647ca23e405STejun Heo BUG_ON(next && next->va_end <= base + end); 2648ca23e405STejun Heo BUG_ON(prev && prev->va_end > base + end); 2649ca23e405STejun Heo 2650ca23e405STejun Heo /* 2651ca23e405STejun Heo * base might have underflowed, add last_end before 2652ca23e405STejun Heo * comparing. 2653ca23e405STejun Heo */ 2654ca23e405STejun Heo if (base + last_end < vmalloc_start + last_end) { 2655ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2656ca23e405STejun Heo if (!purged) { 2657ca23e405STejun Heo purge_vmap_area_lazy(); 2658ca23e405STejun Heo purged = true; 2659ca23e405STejun Heo goto retry; 2660ca23e405STejun Heo } 2661ca23e405STejun Heo goto err_free; 2662ca23e405STejun Heo } 2663ca23e405STejun Heo 2664ca23e405STejun Heo /* 2665ca23e405STejun Heo * If next overlaps, move base downwards so that it's 2666ca23e405STejun Heo * right below next and then recheck. 2667ca23e405STejun Heo */ 2668ca23e405STejun Heo if (next && next->va_start < base + end) { 2669ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2670ca23e405STejun Heo term_area = area; 2671ca23e405STejun Heo continue; 2672ca23e405STejun Heo } 2673ca23e405STejun Heo 2674ca23e405STejun Heo /* 2675ca23e405STejun Heo * If prev overlaps, shift down next and prev and move 2676ca23e405STejun Heo * base so that it's right below new next and then 2677ca23e405STejun Heo * recheck. 2678ca23e405STejun Heo */ 2679ca23e405STejun Heo if (prev && prev->va_end > base + start) { 2680ca23e405STejun Heo next = prev; 2681ca23e405STejun Heo prev = node_to_va(rb_prev(&next->rb_node)); 2682ca23e405STejun Heo base = pvm_determine_end(&next, &prev, align) - end; 2683ca23e405STejun Heo term_area = area; 2684ca23e405STejun Heo continue; 2685ca23e405STejun Heo } 2686ca23e405STejun Heo 2687ca23e405STejun Heo /* 2688ca23e405STejun Heo * This area fits, move on to the previous one. If 2689ca23e405STejun Heo * the previous one is the terminal one, we're done. 2690ca23e405STejun Heo */ 2691ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 2692ca23e405STejun Heo if (area == term_area) 2693ca23e405STejun Heo break; 2694ca23e405STejun Heo start = offsets[area]; 2695ca23e405STejun Heo end = start + sizes[area]; 2696ca23e405STejun Heo pvm_find_next_prev(base + end, &next, &prev); 2697ca23e405STejun Heo } 2698ca23e405STejun Heo found: 2699ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 2700ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2701ca23e405STejun Heo struct vmap_area *va = vas[area]; 2702ca23e405STejun Heo 2703ca23e405STejun Heo va->va_start = base + offsets[area]; 2704ca23e405STejun Heo va->va_end = va->va_start + sizes[area]; 2705ca23e405STejun Heo __insert_vmap_area(va); 2706ca23e405STejun Heo } 2707ca23e405STejun Heo 2708ca23e405STejun Heo vmap_area_pcpu_hole = base + offsets[last_area]; 2709ca23e405STejun Heo 2710ca23e405STejun Heo spin_unlock(&vmap_area_lock); 2711ca23e405STejun Heo 2712ca23e405STejun Heo /* insert all vm's */ 2713ca23e405STejun Heo for (area = 0; area < nr_vms; area++) 27143645cb4aSZhang Yanfei setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2715ca23e405STejun Heo pcpu_get_vm_areas); 2716ca23e405STejun Heo 2717ca23e405STejun Heo kfree(vas); 2718ca23e405STejun Heo return vms; 2719ca23e405STejun Heo 2720ca23e405STejun Heo err_free: 2721ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 2722ca23e405STejun Heo kfree(vas[area]); 2723ca23e405STejun Heo kfree(vms[area]); 2724ca23e405STejun Heo } 2725f1db7afdSKautuk Consul err_free2: 2726ca23e405STejun Heo kfree(vas); 2727ca23e405STejun Heo kfree(vms); 2728ca23e405STejun Heo return NULL; 2729ca23e405STejun Heo } 2730ca23e405STejun Heo 2731ca23e405STejun Heo /** 2732ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 2733ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 2734ca23e405STejun Heo * @nr_vms: the number of allocated areas 2735ca23e405STejun Heo * 2736ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 2737ca23e405STejun Heo */ 2738ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 2739ca23e405STejun Heo { 2740ca23e405STejun Heo int i; 2741ca23e405STejun Heo 2742ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 2743ca23e405STejun Heo free_vm_area(vms[i]); 2744ca23e405STejun Heo kfree(vms); 2745ca23e405STejun Heo } 27464f8b02b4STejun Heo #endif /* CONFIG_SMP */ 2747a10aa579SChristoph Lameter 2748a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 2749a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 2750d4033afdSJoonsoo Kim __acquires(&vmap_area_lock) 2751a10aa579SChristoph Lameter { 2752d4033afdSJoonsoo Kim spin_lock(&vmap_area_lock); 27533f500069Szijun_hu return seq_list_start(&vmap_area_list, *pos); 2754a10aa579SChristoph Lameter } 2755a10aa579SChristoph Lameter 2756a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 2757a10aa579SChristoph Lameter { 27583f500069Szijun_hu return seq_list_next(p, &vmap_area_list, pos); 2759a10aa579SChristoph Lameter } 2760a10aa579SChristoph Lameter 2761a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 2762d4033afdSJoonsoo Kim __releases(&vmap_area_lock) 2763a10aa579SChristoph Lameter { 2764d4033afdSJoonsoo Kim spin_unlock(&vmap_area_lock); 2765a10aa579SChristoph Lameter } 2766a10aa579SChristoph Lameter 2767a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 2768a47a126aSEric Dumazet { 2769e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 2770a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 2771a47a126aSEric Dumazet 2772a47a126aSEric Dumazet if (!counters) 2773a47a126aSEric Dumazet return; 2774a47a126aSEric Dumazet 2775af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 2776af12346cSWanpeng Li return; 27777e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 27787e5b528bSDmitry Vyukov smp_rmb(); 2779af12346cSWanpeng Li 2780a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2781a47a126aSEric Dumazet 2782a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 2783a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 2784a47a126aSEric Dumazet 2785a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 2786a47a126aSEric Dumazet if (counters[nr]) 2787a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 2788a47a126aSEric Dumazet } 2789a47a126aSEric Dumazet } 2790a47a126aSEric Dumazet 2791a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 2792a10aa579SChristoph Lameter { 27933f500069Szijun_hu struct vmap_area *va; 2794d4033afdSJoonsoo Kim struct vm_struct *v; 2795d4033afdSJoonsoo Kim 27963f500069Szijun_hu va = list_entry(p, struct vmap_area, list); 27973f500069Szijun_hu 2798c2ce8c14SWanpeng Li /* 2799c2ce8c14SWanpeng Li * s_show can encounter race with remove_vm_area, !VM_VM_AREA on 2800c2ce8c14SWanpeng Li * behalf of vmap area is being tear down or vm_map_ram allocation. 2801c2ce8c14SWanpeng Li */ 280278c72746SYisheng Xie if (!(va->flags & VM_VM_AREA)) { 280378c72746SYisheng Xie seq_printf(m, "0x%pK-0x%pK %7ld %s\n", 280478c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end, 280578c72746SYisheng Xie va->va_end - va->va_start, 280678c72746SYisheng Xie va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram"); 280778c72746SYisheng Xie 2808d4033afdSJoonsoo Kim return 0; 280978c72746SYisheng Xie } 2810d4033afdSJoonsoo Kim 2811d4033afdSJoonsoo Kim v = va->vm; 2812a10aa579SChristoph Lameter 281345ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 2814a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 2815a10aa579SChristoph Lameter 281662c70bceSJoe Perches if (v->caller) 281762c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 281823016969SChristoph Lameter 2819a10aa579SChristoph Lameter if (v->nr_pages) 2820a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 2821a10aa579SChristoph Lameter 2822a10aa579SChristoph Lameter if (v->phys_addr) 2823199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr); 2824a10aa579SChristoph Lameter 2825a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 2826f4527c90SFabian Frederick seq_puts(m, " ioremap"); 2827a10aa579SChristoph Lameter 2828a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 2829f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 2830a10aa579SChristoph Lameter 2831a10aa579SChristoph Lameter if (v->flags & VM_MAP) 2832f4527c90SFabian Frederick seq_puts(m, " vmap"); 2833a10aa579SChristoph Lameter 2834a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 2835f4527c90SFabian Frederick seq_puts(m, " user"); 2836a10aa579SChristoph Lameter 2837244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 2838f4527c90SFabian Frederick seq_puts(m, " vpages"); 2839a10aa579SChristoph Lameter 2840a47a126aSEric Dumazet show_numa_info(m, v); 2841a10aa579SChristoph Lameter seq_putc(m, '\n'); 2842a10aa579SChristoph Lameter return 0; 2843a10aa579SChristoph Lameter } 2844a10aa579SChristoph Lameter 28455f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 2846a10aa579SChristoph Lameter .start = s_start, 2847a10aa579SChristoph Lameter .next = s_next, 2848a10aa579SChristoph Lameter .stop = s_stop, 2849a10aa579SChristoph Lameter .show = s_show, 2850a10aa579SChristoph Lameter }; 28515f6a6a9cSAlexey Dobriyan 28525f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 28535f6a6a9cSAlexey Dobriyan { 2854fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA)) 28550825a6f9SJoe Perches proc_create_seq_private("vmallocinfo", 0400, NULL, 285644414d82SChristoph Hellwig &vmalloc_op, 285744414d82SChristoph Hellwig nr_node_ids * sizeof(unsigned int), NULL); 2858fddda2b7SChristoph Hellwig else 28590825a6f9SJoe Perches proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 28605f6a6a9cSAlexey Dobriyan return 0; 28615f6a6a9cSAlexey Dobriyan } 28625f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 2863db3808c1SJoonsoo Kim 2864a10aa579SChristoph Lameter #endif 2865