11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 121da177e4SLinus Torvalds #include <linux/module.h> 131da177e4SLinus Torvalds #include <linux/highmem.h> 141da177e4SLinus Torvalds #include <linux/slab.h> 151da177e4SLinus Torvalds #include <linux/spinlock.h> 161da177e4SLinus Torvalds #include <linux/interrupt.h> 17a10aa579SChristoph Lameter #include <linux/seq_file.h> 183ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 191da177e4SLinus Torvalds #include <linux/vmalloc.h> 2023016969SChristoph Lameter #include <linux/kallsyms.h> 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds #include <asm/uaccess.h> 231da177e4SLinus Torvalds #include <asm/tlbflush.h> 241da177e4SLinus Torvalds 251da177e4SLinus Torvalds 261da177e4SLinus Torvalds DEFINE_RWLOCK(vmlist_lock); 271da177e4SLinus Torvalds struct vm_struct *vmlist; 281da177e4SLinus Torvalds 29b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 3023016969SChristoph Lameter int node, void *caller); 31b221385bSAdrian Bunk 321da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 331da177e4SLinus Torvalds { 341da177e4SLinus Torvalds pte_t *pte; 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 371da177e4SLinus Torvalds do { 381da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 391da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 401da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 411da177e4SLinus Torvalds } 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr, 441da177e4SLinus Torvalds unsigned long end) 451da177e4SLinus Torvalds { 461da177e4SLinus Torvalds pmd_t *pmd; 471da177e4SLinus Torvalds unsigned long next; 481da177e4SLinus Torvalds 491da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 501da177e4SLinus Torvalds do { 511da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 521da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 531da177e4SLinus Torvalds continue; 541da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 551da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 561da177e4SLinus Torvalds } 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, 591da177e4SLinus Torvalds unsigned long end) 601da177e4SLinus Torvalds { 611da177e4SLinus Torvalds pud_t *pud; 621da177e4SLinus Torvalds unsigned long next; 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 651da177e4SLinus Torvalds do { 661da177e4SLinus Torvalds next = pud_addr_end(addr, end); 671da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 681da177e4SLinus Torvalds continue; 691da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 701da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 711da177e4SLinus Torvalds } 721da177e4SLinus Torvalds 73c19c03fcSBenjamin Herrenschmidt void unmap_kernel_range(unsigned long addr, unsigned long size) 741da177e4SLinus Torvalds { 751da177e4SLinus Torvalds pgd_t *pgd; 761da177e4SLinus Torvalds unsigned long next; 77c19c03fcSBenjamin Herrenschmidt unsigned long start = addr; 78c19c03fcSBenjamin Herrenschmidt unsigned long end = addr + size; 791da177e4SLinus Torvalds 801da177e4SLinus Torvalds BUG_ON(addr >= end); 811da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 821da177e4SLinus Torvalds flush_cache_vunmap(addr, end); 831da177e4SLinus Torvalds do { 841da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 851da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 861da177e4SLinus Torvalds continue; 871da177e4SLinus Torvalds vunmap_pud_range(pgd, addr, next); 881da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 89c19c03fcSBenjamin Herrenschmidt flush_tlb_kernel_range(start, end); 90c19c03fcSBenjamin Herrenschmidt } 91c19c03fcSBenjamin Herrenschmidt 92c19c03fcSBenjamin Herrenschmidt static void unmap_vm_area(struct vm_struct *area) 93c19c03fcSBenjamin Herrenschmidt { 94c19c03fcSBenjamin Herrenschmidt unmap_kernel_range((unsigned long)area->addr, area->size); 951da177e4SLinus Torvalds } 961da177e4SLinus Torvalds 971da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 981da177e4SLinus Torvalds unsigned long end, pgprot_t prot, struct page ***pages) 991da177e4SLinus Torvalds { 1001da177e4SLinus Torvalds pte_t *pte; 1011da177e4SLinus Torvalds 102872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1031da177e4SLinus Torvalds if (!pte) 1041da177e4SLinus Torvalds return -ENOMEM; 1051da177e4SLinus Torvalds do { 1061da177e4SLinus Torvalds struct page *page = **pages; 1071da177e4SLinus Torvalds WARN_ON(!pte_none(*pte)); 1081da177e4SLinus Torvalds if (!page) 1091da177e4SLinus Torvalds return -ENOMEM; 1101da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 1111da177e4SLinus Torvalds (*pages)++; 1121da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1131da177e4SLinus Torvalds return 0; 1141da177e4SLinus Torvalds } 1151da177e4SLinus Torvalds 1161da177e4SLinus Torvalds static inline int vmap_pmd_range(pud_t *pud, unsigned long addr, 1171da177e4SLinus Torvalds unsigned long end, pgprot_t prot, struct page ***pages) 1181da177e4SLinus Torvalds { 1191da177e4SLinus Torvalds pmd_t *pmd; 1201da177e4SLinus Torvalds unsigned long next; 1211da177e4SLinus Torvalds 1221da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1231da177e4SLinus Torvalds if (!pmd) 1241da177e4SLinus Torvalds return -ENOMEM; 1251da177e4SLinus Torvalds do { 1261da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 1271da177e4SLinus Torvalds if (vmap_pte_range(pmd, addr, next, prot, pages)) 1281da177e4SLinus Torvalds return -ENOMEM; 1291da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1301da177e4SLinus Torvalds return 0; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 1331da177e4SLinus Torvalds static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr, 1341da177e4SLinus Torvalds unsigned long end, pgprot_t prot, struct page ***pages) 1351da177e4SLinus Torvalds { 1361da177e4SLinus Torvalds pud_t *pud; 1371da177e4SLinus Torvalds unsigned long next; 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds pud = pud_alloc(&init_mm, pgd, addr); 1401da177e4SLinus Torvalds if (!pud) 1411da177e4SLinus Torvalds return -ENOMEM; 1421da177e4SLinus Torvalds do { 1431da177e4SLinus Torvalds next = pud_addr_end(addr, end); 1441da177e4SLinus Torvalds if (vmap_pmd_range(pud, addr, next, prot, pages)) 1451da177e4SLinus Torvalds return -ENOMEM; 1461da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1471da177e4SLinus Torvalds return 0; 1481da177e4SLinus Torvalds } 1491da177e4SLinus Torvalds 1501da177e4SLinus Torvalds int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1511da177e4SLinus Torvalds { 1521da177e4SLinus Torvalds pgd_t *pgd; 1531da177e4SLinus Torvalds unsigned long next; 1541da177e4SLinus Torvalds unsigned long addr = (unsigned long) area->addr; 1551da177e4SLinus Torvalds unsigned long end = addr + area->size - PAGE_SIZE; 1561da177e4SLinus Torvalds int err; 1571da177e4SLinus Torvalds 1581da177e4SLinus Torvalds BUG_ON(addr >= end); 1591da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1601da177e4SLinus Torvalds do { 1611da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 1621da177e4SLinus Torvalds err = vmap_pud_range(pgd, addr, next, prot, pages); 1631da177e4SLinus Torvalds if (err) 1641da177e4SLinus Torvalds break; 1651da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1661da177e4SLinus Torvalds flush_cache_vmap((unsigned long) area->addr, end); 1671da177e4SLinus Torvalds return err; 1681da177e4SLinus Torvalds } 1695992b6daSRusty Russell EXPORT_SYMBOL_GPL(map_vm_area); 1701da177e4SLinus Torvalds 17148667e7aSChristoph Lameter /* 17248667e7aSChristoph Lameter * Map a vmalloc()-space virtual address to the physical page. 17348667e7aSChristoph Lameter */ 174b3bdda02SChristoph Lameter struct page *vmalloc_to_page(const void *vmalloc_addr) 17548667e7aSChristoph Lameter { 17648667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 17748667e7aSChristoph Lameter struct page *page = NULL; 17848667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 17948667e7aSChristoph Lameter pud_t *pud; 18048667e7aSChristoph Lameter pmd_t *pmd; 18148667e7aSChristoph Lameter pte_t *ptep, pte; 18248667e7aSChristoph Lameter 18348667e7aSChristoph Lameter if (!pgd_none(*pgd)) { 18448667e7aSChristoph Lameter pud = pud_offset(pgd, addr); 18548667e7aSChristoph Lameter if (!pud_none(*pud)) { 18648667e7aSChristoph Lameter pmd = pmd_offset(pud, addr); 18748667e7aSChristoph Lameter if (!pmd_none(*pmd)) { 18848667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 18948667e7aSChristoph Lameter pte = *ptep; 19048667e7aSChristoph Lameter if (pte_present(pte)) 19148667e7aSChristoph Lameter page = pte_page(pte); 19248667e7aSChristoph Lameter pte_unmap(ptep); 19348667e7aSChristoph Lameter } 19448667e7aSChristoph Lameter } 19548667e7aSChristoph Lameter } 19648667e7aSChristoph Lameter return page; 19748667e7aSChristoph Lameter } 19848667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_page); 19948667e7aSChristoph Lameter 20048667e7aSChristoph Lameter /* 20148667e7aSChristoph Lameter * Map a vmalloc()-space virtual address to the physical page frame number. 20248667e7aSChristoph Lameter */ 203b3bdda02SChristoph Lameter unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 20448667e7aSChristoph Lameter { 20548667e7aSChristoph Lameter return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 20648667e7aSChristoph Lameter } 20748667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_pfn); 20848667e7aSChristoph Lameter 20923016969SChristoph Lameter static struct vm_struct * 21023016969SChristoph Lameter __get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start, 21123016969SChristoph Lameter unsigned long end, int node, gfp_t gfp_mask, void *caller) 2121da177e4SLinus Torvalds { 2131da177e4SLinus Torvalds struct vm_struct **p, *tmp, *area; 2141da177e4SLinus Torvalds unsigned long align = 1; 2151da177e4SLinus Torvalds unsigned long addr; 2161da177e4SLinus Torvalds 21752fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 2181da177e4SLinus Torvalds if (flags & VM_IOREMAP) { 2191da177e4SLinus Torvalds int bit = fls(size); 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds if (bit > IOREMAP_MAX_ORDER) 2221da177e4SLinus Torvalds bit = IOREMAP_MAX_ORDER; 2231da177e4SLinus Torvalds else if (bit < PAGE_SHIFT) 2241da177e4SLinus Torvalds bit = PAGE_SHIFT; 2251da177e4SLinus Torvalds 2261da177e4SLinus Torvalds align = 1ul << bit; 2271da177e4SLinus Torvalds } 2281da177e4SLinus Torvalds addr = ALIGN(start, align); 2291da177e4SLinus Torvalds size = PAGE_ALIGN(size); 23031be8309SOGAWA Hirofumi if (unlikely(!size)) 23131be8309SOGAWA Hirofumi return NULL; 2321da177e4SLinus Torvalds 2336cb06229SChristoph Lameter area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 2346cb06229SChristoph Lameter 2351da177e4SLinus Torvalds if (unlikely(!area)) 2361da177e4SLinus Torvalds return NULL; 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds /* 2391da177e4SLinus Torvalds * We always allocate a guard page. 2401da177e4SLinus Torvalds */ 2411da177e4SLinus Torvalds size += PAGE_SIZE; 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds write_lock(&vmlist_lock); 2441da177e4SLinus Torvalds for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { 2451da177e4SLinus Torvalds if ((unsigned long)tmp->addr < addr) { 2461da177e4SLinus Torvalds if((unsigned long)tmp->addr + tmp->size >= addr) 2471da177e4SLinus Torvalds addr = ALIGN(tmp->size + 2481da177e4SLinus Torvalds (unsigned long)tmp->addr, align); 2491da177e4SLinus Torvalds continue; 2501da177e4SLinus Torvalds } 2511da177e4SLinus Torvalds if ((size + addr) < addr) 2521da177e4SLinus Torvalds goto out; 2531da177e4SLinus Torvalds if (size + addr <= (unsigned long)tmp->addr) 2541da177e4SLinus Torvalds goto found; 2551da177e4SLinus Torvalds addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align); 2561da177e4SLinus Torvalds if (addr > end - size) 2571da177e4SLinus Torvalds goto out; 2581da177e4SLinus Torvalds } 2595dc33185SRobert Bragg if ((size + addr) < addr) 2605dc33185SRobert Bragg goto out; 2615dc33185SRobert Bragg if (addr > end - size) 2625dc33185SRobert Bragg goto out; 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds found: 2651da177e4SLinus Torvalds area->next = *p; 2661da177e4SLinus Torvalds *p = area; 2671da177e4SLinus Torvalds 2681da177e4SLinus Torvalds area->flags = flags; 2691da177e4SLinus Torvalds area->addr = (void *)addr; 2701da177e4SLinus Torvalds area->size = size; 2711da177e4SLinus Torvalds area->pages = NULL; 2721da177e4SLinus Torvalds area->nr_pages = 0; 2731da177e4SLinus Torvalds area->phys_addr = 0; 27423016969SChristoph Lameter area->caller = caller; 2751da177e4SLinus Torvalds write_unlock(&vmlist_lock); 2761da177e4SLinus Torvalds 2771da177e4SLinus Torvalds return area; 2781da177e4SLinus Torvalds 2791da177e4SLinus Torvalds out: 2801da177e4SLinus Torvalds write_unlock(&vmlist_lock); 2811da177e4SLinus Torvalds kfree(area); 2821da177e4SLinus Torvalds if (printk_ratelimit()) 2831da177e4SLinus Torvalds printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n"); 2841da177e4SLinus Torvalds return NULL; 2851da177e4SLinus Torvalds } 2861da177e4SLinus Torvalds 287930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 288930fc45aSChristoph Lameter unsigned long start, unsigned long end) 289930fc45aSChristoph Lameter { 29023016969SChristoph Lameter return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 29123016969SChristoph Lameter __builtin_return_address(0)); 292930fc45aSChristoph Lameter } 2935992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 294930fc45aSChristoph Lameter 2951da177e4SLinus Torvalds /** 296183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 2971da177e4SLinus Torvalds * @size: size of the area 2981da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 2991da177e4SLinus Torvalds * 3001da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 3011da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 3021da177e4SLinus Torvalds * on success or %NULL on failure. 3031da177e4SLinus Torvalds */ 3041da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 3051da177e4SLinus Torvalds { 30623016969SChristoph Lameter return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 30723016969SChristoph Lameter -1, GFP_KERNEL, __builtin_return_address(0)); 30823016969SChristoph Lameter } 30923016969SChristoph Lameter 31023016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 31123016969SChristoph Lameter void *caller) 31223016969SChristoph Lameter { 31323016969SChristoph Lameter return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 31423016969SChristoph Lameter -1, GFP_KERNEL, caller); 3151da177e4SLinus Torvalds } 3161da177e4SLinus Torvalds 31752fd24caSGiridhar Pemmasani struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 31852fd24caSGiridhar Pemmasani int node, gfp_t gfp_mask) 319930fc45aSChristoph Lameter { 32052fd24caSGiridhar Pemmasani return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 32123016969SChristoph Lameter gfp_mask, __builtin_return_address(0)); 322930fc45aSChristoph Lameter } 323930fc45aSChristoph Lameter 3247856dfebSAndi Kleen /* Caller must hold vmlist_lock */ 325b3bdda02SChristoph Lameter static struct vm_struct *__find_vm_area(const void *addr) 32683342314SNick Piggin { 32783342314SNick Piggin struct vm_struct *tmp; 32883342314SNick Piggin 32983342314SNick Piggin for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { 33083342314SNick Piggin if (tmp->addr == addr) 33183342314SNick Piggin break; 33283342314SNick Piggin } 33383342314SNick Piggin 33483342314SNick Piggin return tmp; 33583342314SNick Piggin } 33683342314SNick Piggin 33783342314SNick Piggin /* Caller must hold vmlist_lock */ 338b3bdda02SChristoph Lameter static struct vm_struct *__remove_vm_area(const void *addr) 3397856dfebSAndi Kleen { 3407856dfebSAndi Kleen struct vm_struct **p, *tmp; 3417856dfebSAndi Kleen 3427856dfebSAndi Kleen for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { 3437856dfebSAndi Kleen if (tmp->addr == addr) 3447856dfebSAndi Kleen goto found; 3457856dfebSAndi Kleen } 3467856dfebSAndi Kleen return NULL; 3477856dfebSAndi Kleen 3487856dfebSAndi Kleen found: 3497856dfebSAndi Kleen unmap_vm_area(tmp); 3507856dfebSAndi Kleen *p = tmp->next; 3517856dfebSAndi Kleen 3527856dfebSAndi Kleen /* 3537856dfebSAndi Kleen * Remove the guard page. 3547856dfebSAndi Kleen */ 3557856dfebSAndi Kleen tmp->size -= PAGE_SIZE; 3567856dfebSAndi Kleen return tmp; 3577856dfebSAndi Kleen } 3587856dfebSAndi Kleen 3591da177e4SLinus Torvalds /** 360183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 3611da177e4SLinus Torvalds * @addr: base address 3621da177e4SLinus Torvalds * 3631da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 3641da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 3657856dfebSAndi Kleen * on SMP machines, except for its size or flags. 3661da177e4SLinus Torvalds */ 367b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 3681da177e4SLinus Torvalds { 3697856dfebSAndi Kleen struct vm_struct *v; 3701da177e4SLinus Torvalds write_lock(&vmlist_lock); 3717856dfebSAndi Kleen v = __remove_vm_area(addr); 3721da177e4SLinus Torvalds write_unlock(&vmlist_lock); 3737856dfebSAndi Kleen return v; 3741da177e4SLinus Torvalds } 3751da177e4SLinus Torvalds 376b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 3771da177e4SLinus Torvalds { 3781da177e4SLinus Torvalds struct vm_struct *area; 3791da177e4SLinus Torvalds 3801da177e4SLinus Torvalds if (!addr) 3811da177e4SLinus Torvalds return; 3821da177e4SLinus Torvalds 3831da177e4SLinus Torvalds if ((PAGE_SIZE-1) & (unsigned long)addr) { 384*4c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 3851da177e4SLinus Torvalds return; 3861da177e4SLinus Torvalds } 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds area = remove_vm_area(addr); 3891da177e4SLinus Torvalds if (unlikely(!area)) { 390*4c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 3911da177e4SLinus Torvalds addr); 3921da177e4SLinus Torvalds return; 3931da177e4SLinus Torvalds } 3941da177e4SLinus Torvalds 3959a11b49aSIngo Molnar debug_check_no_locks_freed(addr, area->size); 3963ac7fe5aSThomas Gleixner debug_check_no_obj_freed(addr, area->size); 3979a11b49aSIngo Molnar 3981da177e4SLinus Torvalds if (deallocate_pages) { 3991da177e4SLinus Torvalds int i; 4001da177e4SLinus Torvalds 4011da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 402bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 403bf53d6f8SChristoph Lameter 404bf53d6f8SChristoph Lameter BUG_ON(!page); 405bf53d6f8SChristoph Lameter __free_page(page); 4061da177e4SLinus Torvalds } 4071da177e4SLinus Torvalds 4088757d5faSJan Kiszka if (area->flags & VM_VPAGES) 4091da177e4SLinus Torvalds vfree(area->pages); 4101da177e4SLinus Torvalds else 4111da177e4SLinus Torvalds kfree(area->pages); 4121da177e4SLinus Torvalds } 4131da177e4SLinus Torvalds 4141da177e4SLinus Torvalds kfree(area); 4151da177e4SLinus Torvalds return; 4161da177e4SLinus Torvalds } 4171da177e4SLinus Torvalds 4181da177e4SLinus Torvalds /** 4191da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 4201da177e4SLinus Torvalds * @addr: memory base address 4211da177e4SLinus Torvalds * 422183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 42380e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 42480e93effSPekka Enberg * NULL, no operation is performed. 4251da177e4SLinus Torvalds * 42680e93effSPekka Enberg * Must not be called in interrupt context. 4271da177e4SLinus Torvalds */ 428b3bdda02SChristoph Lameter void vfree(const void *addr) 4291da177e4SLinus Torvalds { 4301da177e4SLinus Torvalds BUG_ON(in_interrupt()); 4311da177e4SLinus Torvalds __vunmap(addr, 1); 4321da177e4SLinus Torvalds } 4331da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 4341da177e4SLinus Torvalds 4351da177e4SLinus Torvalds /** 4361da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 4371da177e4SLinus Torvalds * @addr: memory base address 4381da177e4SLinus Torvalds * 4391da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 4401da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 4411da177e4SLinus Torvalds * 44280e93effSPekka Enberg * Must not be called in interrupt context. 4431da177e4SLinus Torvalds */ 444b3bdda02SChristoph Lameter void vunmap(const void *addr) 4451da177e4SLinus Torvalds { 4461da177e4SLinus Torvalds BUG_ON(in_interrupt()); 4471da177e4SLinus Torvalds __vunmap(addr, 0); 4481da177e4SLinus Torvalds } 4491da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 4501da177e4SLinus Torvalds 4511da177e4SLinus Torvalds /** 4521da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 4531da177e4SLinus Torvalds * @pages: array of page pointers 4541da177e4SLinus Torvalds * @count: number of pages to map 4551da177e4SLinus Torvalds * @flags: vm_area->flags 4561da177e4SLinus Torvalds * @prot: page protection for the mapping 4571da177e4SLinus Torvalds * 4581da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 4591da177e4SLinus Torvalds * space. 4601da177e4SLinus Torvalds */ 4611da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 4621da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 4631da177e4SLinus Torvalds { 4641da177e4SLinus Torvalds struct vm_struct *area; 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds if (count > num_physpages) 4671da177e4SLinus Torvalds return NULL; 4681da177e4SLinus Torvalds 46923016969SChristoph Lameter area = get_vm_area_caller((count << PAGE_SHIFT), flags, 47023016969SChristoph Lameter __builtin_return_address(0)); 4711da177e4SLinus Torvalds if (!area) 4721da177e4SLinus Torvalds return NULL; 47323016969SChristoph Lameter 4741da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) { 4751da177e4SLinus Torvalds vunmap(area->addr); 4761da177e4SLinus Torvalds return NULL; 4771da177e4SLinus Torvalds } 4781da177e4SLinus Torvalds 4791da177e4SLinus Torvalds return area->addr; 4801da177e4SLinus Torvalds } 4811da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 4821da177e4SLinus Torvalds 483e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 48423016969SChristoph Lameter pgprot_t prot, int node, void *caller) 4851da177e4SLinus Torvalds { 4861da177e4SLinus Torvalds struct page **pages; 4871da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 4881da177e4SLinus Torvalds 4891da177e4SLinus Torvalds nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 4901da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 4911da177e4SLinus Torvalds 4921da177e4SLinus Torvalds area->nr_pages = nr_pages; 4931da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 4948757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 49594f6030cSChristoph Lameter pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 49623016969SChristoph Lameter PAGE_KERNEL, node, caller); 4978757d5faSJan Kiszka area->flags |= VM_VPAGES; 498286e1ea3SAndrew Morton } else { 499286e1ea3SAndrew Morton pages = kmalloc_node(array_size, 5006cb06229SChristoph Lameter (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, 501286e1ea3SAndrew Morton node); 502286e1ea3SAndrew Morton } 5031da177e4SLinus Torvalds area->pages = pages; 50423016969SChristoph Lameter area->caller = caller; 5051da177e4SLinus Torvalds if (!area->pages) { 5061da177e4SLinus Torvalds remove_vm_area(area->addr); 5071da177e4SLinus Torvalds kfree(area); 5081da177e4SLinus Torvalds return NULL; 5091da177e4SLinus Torvalds } 5101da177e4SLinus Torvalds 5111da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 512bf53d6f8SChristoph Lameter struct page *page; 513bf53d6f8SChristoph Lameter 514930fc45aSChristoph Lameter if (node < 0) 515bf53d6f8SChristoph Lameter page = alloc_page(gfp_mask); 516930fc45aSChristoph Lameter else 517bf53d6f8SChristoph Lameter page = alloc_pages_node(node, gfp_mask, 0); 518bf53d6f8SChristoph Lameter 519bf53d6f8SChristoph Lameter if (unlikely(!page)) { 5201da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 5211da177e4SLinus Torvalds area->nr_pages = i; 5221da177e4SLinus Torvalds goto fail; 5231da177e4SLinus Torvalds } 524bf53d6f8SChristoph Lameter area->pages[i] = page; 5251da177e4SLinus Torvalds } 5261da177e4SLinus Torvalds 5271da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) 5281da177e4SLinus Torvalds goto fail; 5291da177e4SLinus Torvalds return area->addr; 5301da177e4SLinus Torvalds 5311da177e4SLinus Torvalds fail: 5321da177e4SLinus Torvalds vfree(area->addr); 5331da177e4SLinus Torvalds return NULL; 5341da177e4SLinus Torvalds } 5351da177e4SLinus Torvalds 536930fc45aSChristoph Lameter void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 537930fc45aSChristoph Lameter { 53823016969SChristoph Lameter return __vmalloc_area_node(area, gfp_mask, prot, -1, 53923016969SChristoph Lameter __builtin_return_address(0)); 540930fc45aSChristoph Lameter } 541930fc45aSChristoph Lameter 5421da177e4SLinus Torvalds /** 543930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 5441da177e4SLinus Torvalds * @size: allocation size 5451da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 5461da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 547d44e0780SRandy Dunlap * @node: node to use for allocation or -1 548c85d194bSRandy Dunlap * @caller: caller's return address 5491da177e4SLinus Torvalds * 5501da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 5511da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 5521da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 5531da177e4SLinus Torvalds */ 554b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 55523016969SChristoph Lameter int node, void *caller) 5561da177e4SLinus Torvalds { 5571da177e4SLinus Torvalds struct vm_struct *area; 5581da177e4SLinus Torvalds 5591da177e4SLinus Torvalds size = PAGE_ALIGN(size); 5601da177e4SLinus Torvalds if (!size || (size >> PAGE_SHIFT) > num_physpages) 5611da177e4SLinus Torvalds return NULL; 5621da177e4SLinus Torvalds 56323016969SChristoph Lameter area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 56423016969SChristoph Lameter node, gfp_mask, caller); 56523016969SChristoph Lameter 5661da177e4SLinus Torvalds if (!area) 5671da177e4SLinus Torvalds return NULL; 5681da177e4SLinus Torvalds 56923016969SChristoph Lameter return __vmalloc_area_node(area, gfp_mask, prot, node, caller); 5701da177e4SLinus Torvalds } 5711da177e4SLinus Torvalds 572930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 573930fc45aSChristoph Lameter { 57423016969SChristoph Lameter return __vmalloc_node(size, gfp_mask, prot, -1, 57523016969SChristoph Lameter __builtin_return_address(0)); 576930fc45aSChristoph Lameter } 5771da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 5781da177e4SLinus Torvalds 5791da177e4SLinus Torvalds /** 5801da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 5811da177e4SLinus Torvalds * @size: allocation size 5821da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 5831da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 5841da177e4SLinus Torvalds * 585c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 5861da177e4SLinus Torvalds * use __vmalloc() instead. 5871da177e4SLinus Torvalds */ 5881da177e4SLinus Torvalds void *vmalloc(unsigned long size) 5891da177e4SLinus Torvalds { 59023016969SChristoph Lameter return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 59123016969SChristoph Lameter -1, __builtin_return_address(0)); 5921da177e4SLinus Torvalds } 5931da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 5941da177e4SLinus Torvalds 595930fc45aSChristoph Lameter /** 596ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 59783342314SNick Piggin * @size: allocation size 598ead04089SRolf Eike Beer * 599ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 600ead04089SRolf Eike Beer * without leaking data. 60183342314SNick Piggin */ 60283342314SNick Piggin void *vmalloc_user(unsigned long size) 60383342314SNick Piggin { 60483342314SNick Piggin struct vm_struct *area; 60583342314SNick Piggin void *ret; 60683342314SNick Piggin 60783342314SNick Piggin ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); 6082b4ac44eSEric Dumazet if (ret) { 60983342314SNick Piggin write_lock(&vmlist_lock); 61083342314SNick Piggin area = __find_vm_area(ret); 61183342314SNick Piggin area->flags |= VM_USERMAP; 61283342314SNick Piggin write_unlock(&vmlist_lock); 6132b4ac44eSEric Dumazet } 61483342314SNick Piggin return ret; 61583342314SNick Piggin } 61683342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 61783342314SNick Piggin 61883342314SNick Piggin /** 619930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 620930fc45aSChristoph Lameter * @size: allocation size 621d44e0780SRandy Dunlap * @node: numa node 622930fc45aSChristoph Lameter * 623930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 624930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 625930fc45aSChristoph Lameter * 626c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 627930fc45aSChristoph Lameter * use __vmalloc() instead. 628930fc45aSChristoph Lameter */ 629930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 630930fc45aSChristoph Lameter { 63123016969SChristoph Lameter return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 63223016969SChristoph Lameter node, __builtin_return_address(0)); 633930fc45aSChristoph Lameter } 634930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 635930fc45aSChristoph Lameter 6364dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC 6374dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL 6384dc3b16bSPavel Pisa #endif 6394dc3b16bSPavel Pisa 6401da177e4SLinus Torvalds /** 6411da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 6421da177e4SLinus Torvalds * @size: allocation size 6431da177e4SLinus Torvalds * 6441da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 6451da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 6461da177e4SLinus Torvalds * executable kernel virtual space. 6471da177e4SLinus Torvalds * 648c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 6491da177e4SLinus Torvalds * use __vmalloc() instead. 6501da177e4SLinus Torvalds */ 6511da177e4SLinus Torvalds 6521da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 6531da177e4SLinus Torvalds { 6541da177e4SLinus Torvalds return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); 6551da177e4SLinus Torvalds } 6561da177e4SLinus Torvalds 6570d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 6587ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 6590d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 6607ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 6610d08e0d3SAndi Kleen #else 6620d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL 6630d08e0d3SAndi Kleen #endif 6640d08e0d3SAndi Kleen 6651da177e4SLinus Torvalds /** 6661da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 6671da177e4SLinus Torvalds * @size: allocation size 6681da177e4SLinus Torvalds * 6691da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 6701da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 6711da177e4SLinus Torvalds */ 6721da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 6731da177e4SLinus Torvalds { 6740d08e0d3SAndi Kleen return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); 6751da177e4SLinus Torvalds } 6761da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 6771da177e4SLinus Torvalds 67883342314SNick Piggin /** 679ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 68083342314SNick Piggin * @size: allocation size 681ead04089SRolf Eike Beer * 682ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 683ead04089SRolf Eike Beer * mapped to userspace without leaking data. 68483342314SNick Piggin */ 68583342314SNick Piggin void *vmalloc_32_user(unsigned long size) 68683342314SNick Piggin { 68783342314SNick Piggin struct vm_struct *area; 68883342314SNick Piggin void *ret; 68983342314SNick Piggin 6900d08e0d3SAndi Kleen ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); 6912b4ac44eSEric Dumazet if (ret) { 69283342314SNick Piggin write_lock(&vmlist_lock); 69383342314SNick Piggin area = __find_vm_area(ret); 69483342314SNick Piggin area->flags |= VM_USERMAP; 69583342314SNick Piggin write_unlock(&vmlist_lock); 6962b4ac44eSEric Dumazet } 69783342314SNick Piggin return ret; 69883342314SNick Piggin } 69983342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 70083342314SNick Piggin 7011da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 7021da177e4SLinus Torvalds { 7031da177e4SLinus Torvalds struct vm_struct *tmp; 7041da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 7051da177e4SLinus Torvalds unsigned long n; 7061da177e4SLinus Torvalds 7071da177e4SLinus Torvalds /* Don't allow overflow */ 7081da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 7091da177e4SLinus Torvalds count = -(unsigned long) addr; 7101da177e4SLinus Torvalds 7111da177e4SLinus Torvalds read_lock(&vmlist_lock); 7121da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 7131da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 7141da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 7151da177e4SLinus Torvalds continue; 7161da177e4SLinus Torvalds while (addr < vaddr) { 7171da177e4SLinus Torvalds if (count == 0) 7181da177e4SLinus Torvalds goto finished; 7191da177e4SLinus Torvalds *buf = '\0'; 7201da177e4SLinus Torvalds buf++; 7211da177e4SLinus Torvalds addr++; 7221da177e4SLinus Torvalds count--; 7231da177e4SLinus Torvalds } 7241da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 7251da177e4SLinus Torvalds do { 7261da177e4SLinus Torvalds if (count == 0) 7271da177e4SLinus Torvalds goto finished; 7281da177e4SLinus Torvalds *buf = *addr; 7291da177e4SLinus Torvalds buf++; 7301da177e4SLinus Torvalds addr++; 7311da177e4SLinus Torvalds count--; 7321da177e4SLinus Torvalds } while (--n > 0); 7331da177e4SLinus Torvalds } 7341da177e4SLinus Torvalds finished: 7351da177e4SLinus Torvalds read_unlock(&vmlist_lock); 7361da177e4SLinus Torvalds return buf - buf_start; 7371da177e4SLinus Torvalds } 7381da177e4SLinus Torvalds 7391da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 7401da177e4SLinus Torvalds { 7411da177e4SLinus Torvalds struct vm_struct *tmp; 7421da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 7431da177e4SLinus Torvalds unsigned long n; 7441da177e4SLinus Torvalds 7451da177e4SLinus Torvalds /* Don't allow overflow */ 7461da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 7471da177e4SLinus Torvalds count = -(unsigned long) addr; 7481da177e4SLinus Torvalds 7491da177e4SLinus Torvalds read_lock(&vmlist_lock); 7501da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 7511da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 7521da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 7531da177e4SLinus Torvalds continue; 7541da177e4SLinus Torvalds while (addr < vaddr) { 7551da177e4SLinus Torvalds if (count == 0) 7561da177e4SLinus Torvalds goto finished; 7571da177e4SLinus Torvalds buf++; 7581da177e4SLinus Torvalds addr++; 7591da177e4SLinus Torvalds count--; 7601da177e4SLinus Torvalds } 7611da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 7621da177e4SLinus Torvalds do { 7631da177e4SLinus Torvalds if (count == 0) 7641da177e4SLinus Torvalds goto finished; 7651da177e4SLinus Torvalds *addr = *buf; 7661da177e4SLinus Torvalds buf++; 7671da177e4SLinus Torvalds addr++; 7681da177e4SLinus Torvalds count--; 7691da177e4SLinus Torvalds } while (--n > 0); 7701da177e4SLinus Torvalds } 7711da177e4SLinus Torvalds finished: 7721da177e4SLinus Torvalds read_unlock(&vmlist_lock); 7731da177e4SLinus Torvalds return buf - buf_start; 7741da177e4SLinus Torvalds } 77583342314SNick Piggin 77683342314SNick Piggin /** 77783342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 77883342314SNick Piggin * @vma: vma to cover (map full range of vma) 77983342314SNick Piggin * @addr: vmalloc memory 78083342314SNick Piggin * @pgoff: number of pages into addr before first page to map 7817682486bSRandy Dunlap * 7827682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 78383342314SNick Piggin * 78483342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 78583342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 78683342314SNick Piggin * that criteria isn't met. 78783342314SNick Piggin * 78872fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 78983342314SNick Piggin */ 79083342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 79183342314SNick Piggin unsigned long pgoff) 79283342314SNick Piggin { 79383342314SNick Piggin struct vm_struct *area; 79483342314SNick Piggin unsigned long uaddr = vma->vm_start; 79583342314SNick Piggin unsigned long usize = vma->vm_end - vma->vm_start; 79683342314SNick Piggin int ret; 79783342314SNick Piggin 79883342314SNick Piggin if ((PAGE_SIZE-1) & (unsigned long)addr) 79983342314SNick Piggin return -EINVAL; 80083342314SNick Piggin 80183342314SNick Piggin read_lock(&vmlist_lock); 80283342314SNick Piggin area = __find_vm_area(addr); 80383342314SNick Piggin if (!area) 80483342314SNick Piggin goto out_einval_locked; 80583342314SNick Piggin 80683342314SNick Piggin if (!(area->flags & VM_USERMAP)) 80783342314SNick Piggin goto out_einval_locked; 80883342314SNick Piggin 80983342314SNick Piggin if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 81083342314SNick Piggin goto out_einval_locked; 81183342314SNick Piggin read_unlock(&vmlist_lock); 81283342314SNick Piggin 81383342314SNick Piggin addr += pgoff << PAGE_SHIFT; 81483342314SNick Piggin do { 81583342314SNick Piggin struct page *page = vmalloc_to_page(addr); 81683342314SNick Piggin ret = vm_insert_page(vma, uaddr, page); 81783342314SNick Piggin if (ret) 81883342314SNick Piggin return ret; 81983342314SNick Piggin 82083342314SNick Piggin uaddr += PAGE_SIZE; 82183342314SNick Piggin addr += PAGE_SIZE; 82283342314SNick Piggin usize -= PAGE_SIZE; 82383342314SNick Piggin } while (usize > 0); 82483342314SNick Piggin 82583342314SNick Piggin /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 82683342314SNick Piggin vma->vm_flags |= VM_RESERVED; 82783342314SNick Piggin 82883342314SNick Piggin return ret; 82983342314SNick Piggin 83083342314SNick Piggin out_einval_locked: 83183342314SNick Piggin read_unlock(&vmlist_lock); 83283342314SNick Piggin return -EINVAL; 83383342314SNick Piggin } 83483342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 83583342314SNick Piggin 8361eeb66a1SChristoph Hellwig /* 8371eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 8381eeb66a1SChristoph Hellwig * have one. 8391eeb66a1SChristoph Hellwig */ 8401eeb66a1SChristoph Hellwig void __attribute__((weak)) vmalloc_sync_all(void) 8411eeb66a1SChristoph Hellwig { 8421eeb66a1SChristoph Hellwig } 8435f4352fbSJeremy Fitzhardinge 8445f4352fbSJeremy Fitzhardinge 8452f569afdSMartin Schwidefsky static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 8465f4352fbSJeremy Fitzhardinge { 8475f4352fbSJeremy Fitzhardinge /* apply_to_page_range() does all the hard work. */ 8485f4352fbSJeremy Fitzhardinge return 0; 8495f4352fbSJeremy Fitzhardinge } 8505f4352fbSJeremy Fitzhardinge 8515f4352fbSJeremy Fitzhardinge /** 8525f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 8535f4352fbSJeremy Fitzhardinge * @size: size of the area 8547682486bSRandy Dunlap * 8557682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 8565f4352fbSJeremy Fitzhardinge * 8575f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 8585f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 8595f4352fbSJeremy Fitzhardinge * are created. If the kernel address space is not shared 8605f4352fbSJeremy Fitzhardinge * between processes, it syncs the pagetable across all 8615f4352fbSJeremy Fitzhardinge * processes. 8625f4352fbSJeremy Fitzhardinge */ 8635f4352fbSJeremy Fitzhardinge struct vm_struct *alloc_vm_area(size_t size) 8645f4352fbSJeremy Fitzhardinge { 8655f4352fbSJeremy Fitzhardinge struct vm_struct *area; 8665f4352fbSJeremy Fitzhardinge 86723016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 86823016969SChristoph Lameter __builtin_return_address(0)); 8695f4352fbSJeremy Fitzhardinge if (area == NULL) 8705f4352fbSJeremy Fitzhardinge return NULL; 8715f4352fbSJeremy Fitzhardinge 8725f4352fbSJeremy Fitzhardinge /* 8735f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 8745f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 8755f4352fbSJeremy Fitzhardinge */ 8765f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 8775f4352fbSJeremy Fitzhardinge area->size, f, NULL)) { 8785f4352fbSJeremy Fitzhardinge free_vm_area(area); 8795f4352fbSJeremy Fitzhardinge return NULL; 8805f4352fbSJeremy Fitzhardinge } 8815f4352fbSJeremy Fitzhardinge 8825f4352fbSJeremy Fitzhardinge /* Make sure the pagetables are constructed in process kernel 8835f4352fbSJeremy Fitzhardinge mappings */ 8845f4352fbSJeremy Fitzhardinge vmalloc_sync_all(); 8855f4352fbSJeremy Fitzhardinge 8865f4352fbSJeremy Fitzhardinge return area; 8875f4352fbSJeremy Fitzhardinge } 8885f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 8895f4352fbSJeremy Fitzhardinge 8905f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 8915f4352fbSJeremy Fitzhardinge { 8925f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 8935f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 8945f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 8955f4352fbSJeremy Fitzhardinge kfree(area); 8965f4352fbSJeremy Fitzhardinge } 8975f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 898a10aa579SChristoph Lameter 899a10aa579SChristoph Lameter 900a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 901a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 902a10aa579SChristoph Lameter { 903a10aa579SChristoph Lameter loff_t n = *pos; 904a10aa579SChristoph Lameter struct vm_struct *v; 905a10aa579SChristoph Lameter 906a10aa579SChristoph Lameter read_lock(&vmlist_lock); 907a10aa579SChristoph Lameter v = vmlist; 908a10aa579SChristoph Lameter while (n > 0 && v) { 909a10aa579SChristoph Lameter n--; 910a10aa579SChristoph Lameter v = v->next; 911a10aa579SChristoph Lameter } 912a10aa579SChristoph Lameter if (!n) 913a10aa579SChristoph Lameter return v; 914a10aa579SChristoph Lameter 915a10aa579SChristoph Lameter return NULL; 916a10aa579SChristoph Lameter 917a10aa579SChristoph Lameter } 918a10aa579SChristoph Lameter 919a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 920a10aa579SChristoph Lameter { 921a10aa579SChristoph Lameter struct vm_struct *v = p; 922a10aa579SChristoph Lameter 923a10aa579SChristoph Lameter ++*pos; 924a10aa579SChristoph Lameter return v->next; 925a10aa579SChristoph Lameter } 926a10aa579SChristoph Lameter 927a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 928a10aa579SChristoph Lameter { 929a10aa579SChristoph Lameter read_unlock(&vmlist_lock); 930a10aa579SChristoph Lameter } 931a10aa579SChristoph Lameter 932a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 933a47a126aSEric Dumazet { 934a47a126aSEric Dumazet if (NUMA_BUILD) { 935a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 936a47a126aSEric Dumazet 937a47a126aSEric Dumazet if (!counters) 938a47a126aSEric Dumazet return; 939a47a126aSEric Dumazet 940a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 941a47a126aSEric Dumazet 942a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 943a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 944a47a126aSEric Dumazet 945a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 946a47a126aSEric Dumazet if (counters[nr]) 947a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 948a47a126aSEric Dumazet } 949a47a126aSEric Dumazet } 950a47a126aSEric Dumazet 951a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 952a10aa579SChristoph Lameter { 953a10aa579SChristoph Lameter struct vm_struct *v = p; 954a10aa579SChristoph Lameter 955a10aa579SChristoph Lameter seq_printf(m, "0x%p-0x%p %7ld", 956a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 957a10aa579SChristoph Lameter 95823016969SChristoph Lameter if (v->caller) { 95923016969SChristoph Lameter char buff[2 * KSYM_NAME_LEN]; 96023016969SChristoph Lameter 96123016969SChristoph Lameter seq_putc(m, ' '); 96223016969SChristoph Lameter sprint_symbol(buff, (unsigned long)v->caller); 96323016969SChristoph Lameter seq_puts(m, buff); 96423016969SChristoph Lameter } 96523016969SChristoph Lameter 966a10aa579SChristoph Lameter if (v->nr_pages) 967a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 968a10aa579SChristoph Lameter 969a10aa579SChristoph Lameter if (v->phys_addr) 970a10aa579SChristoph Lameter seq_printf(m, " phys=%lx", v->phys_addr); 971a10aa579SChristoph Lameter 972a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 973a10aa579SChristoph Lameter seq_printf(m, " ioremap"); 974a10aa579SChristoph Lameter 975a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 976a10aa579SChristoph Lameter seq_printf(m, " vmalloc"); 977a10aa579SChristoph Lameter 978a10aa579SChristoph Lameter if (v->flags & VM_MAP) 979a10aa579SChristoph Lameter seq_printf(m, " vmap"); 980a10aa579SChristoph Lameter 981a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 982a10aa579SChristoph Lameter seq_printf(m, " user"); 983a10aa579SChristoph Lameter 984a10aa579SChristoph Lameter if (v->flags & VM_VPAGES) 985a10aa579SChristoph Lameter seq_printf(m, " vpages"); 986a10aa579SChristoph Lameter 987a47a126aSEric Dumazet show_numa_info(m, v); 988a10aa579SChristoph Lameter seq_putc(m, '\n'); 989a10aa579SChristoph Lameter return 0; 990a10aa579SChristoph Lameter } 991a10aa579SChristoph Lameter 992a10aa579SChristoph Lameter const struct seq_operations vmalloc_op = { 993a10aa579SChristoph Lameter .start = s_start, 994a10aa579SChristoph Lameter .next = s_next, 995a10aa579SChristoph Lameter .stop = s_stop, 996a10aa579SChristoph Lameter .show = s_show, 997a10aa579SChristoph Lameter }; 998a10aa579SChristoph Lameter #endif 999a10aa579SChristoph Lameter 1000