11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/vmalloc.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 51da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 61da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 71da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 121da177e4SLinus Torvalds #include <linux/module.h> 131da177e4SLinus Torvalds #include <linux/highmem.h> 141da177e4SLinus Torvalds #include <linux/slab.h> 151da177e4SLinus Torvalds #include <linux/spinlock.h> 161da177e4SLinus Torvalds #include <linux/interrupt.h> 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds #include <linux/vmalloc.h> 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds #include <asm/uaccess.h> 211da177e4SLinus Torvalds #include <asm/tlbflush.h> 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds DEFINE_RWLOCK(vmlist_lock); 251da177e4SLinus Torvalds struct vm_struct *vmlist; 261da177e4SLinus Torvalds 27b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 28b221385bSAdrian Bunk int node); 29b221385bSAdrian Bunk 301da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 311da177e4SLinus Torvalds { 321da177e4SLinus Torvalds pte_t *pte; 331da177e4SLinus Torvalds 341da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 351da177e4SLinus Torvalds do { 361da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 371da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 381da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 391da177e4SLinus Torvalds } 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr, 421da177e4SLinus Torvalds unsigned long end) 431da177e4SLinus Torvalds { 441da177e4SLinus Torvalds pmd_t *pmd; 451da177e4SLinus Torvalds unsigned long next; 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 481da177e4SLinus Torvalds do { 491da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 501da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 511da177e4SLinus Torvalds continue; 521da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 531da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 541da177e4SLinus Torvalds } 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, 571da177e4SLinus Torvalds unsigned long end) 581da177e4SLinus Torvalds { 591da177e4SLinus Torvalds pud_t *pud; 601da177e4SLinus Torvalds unsigned long next; 611da177e4SLinus Torvalds 621da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 631da177e4SLinus Torvalds do { 641da177e4SLinus Torvalds next = pud_addr_end(addr, end); 651da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 661da177e4SLinus Torvalds continue; 671da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 681da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 691da177e4SLinus Torvalds } 701da177e4SLinus Torvalds 71c19c03fcSBenjamin Herrenschmidt void unmap_kernel_range(unsigned long addr, unsigned long size) 721da177e4SLinus Torvalds { 731da177e4SLinus Torvalds pgd_t *pgd; 741da177e4SLinus Torvalds unsigned long next; 75c19c03fcSBenjamin Herrenschmidt unsigned long start = addr; 76c19c03fcSBenjamin Herrenschmidt unsigned long end = addr + size; 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds BUG_ON(addr >= end); 791da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 801da177e4SLinus Torvalds flush_cache_vunmap(addr, end); 811da177e4SLinus Torvalds do { 821da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 831da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 841da177e4SLinus Torvalds continue; 851da177e4SLinus Torvalds vunmap_pud_range(pgd, addr, next); 861da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 87c19c03fcSBenjamin Herrenschmidt flush_tlb_kernel_range(start, end); 88c19c03fcSBenjamin Herrenschmidt } 89c19c03fcSBenjamin Herrenschmidt 90c19c03fcSBenjamin Herrenschmidt static void unmap_vm_area(struct vm_struct *area) 91c19c03fcSBenjamin Herrenschmidt { 92c19c03fcSBenjamin Herrenschmidt unmap_kernel_range((unsigned long)area->addr, area->size); 931da177e4SLinus Torvalds } 941da177e4SLinus Torvalds 951da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 961da177e4SLinus Torvalds unsigned long end, pgprot_t prot, struct page ***pages) 971da177e4SLinus Torvalds { 981da177e4SLinus Torvalds pte_t *pte; 991da177e4SLinus Torvalds 100872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1011da177e4SLinus Torvalds if (!pte) 1021da177e4SLinus Torvalds return -ENOMEM; 1031da177e4SLinus Torvalds do { 1041da177e4SLinus Torvalds struct page *page = **pages; 1051da177e4SLinus Torvalds WARN_ON(!pte_none(*pte)); 1061da177e4SLinus Torvalds if (!page) 1071da177e4SLinus Torvalds return -ENOMEM; 1081da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 1091da177e4SLinus Torvalds (*pages)++; 1101da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1111da177e4SLinus Torvalds return 0; 1121da177e4SLinus Torvalds } 1131da177e4SLinus Torvalds 1141da177e4SLinus Torvalds static inline int vmap_pmd_range(pud_t *pud, unsigned long addr, 1151da177e4SLinus Torvalds unsigned long end, pgprot_t prot, struct page ***pages) 1161da177e4SLinus Torvalds { 1171da177e4SLinus Torvalds pmd_t *pmd; 1181da177e4SLinus Torvalds unsigned long next; 1191da177e4SLinus Torvalds 1201da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1211da177e4SLinus Torvalds if (!pmd) 1221da177e4SLinus Torvalds return -ENOMEM; 1231da177e4SLinus Torvalds do { 1241da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 1251da177e4SLinus Torvalds if (vmap_pte_range(pmd, addr, next, prot, pages)) 1261da177e4SLinus Torvalds return -ENOMEM; 1271da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1281da177e4SLinus Torvalds return 0; 1291da177e4SLinus Torvalds } 1301da177e4SLinus Torvalds 1311da177e4SLinus Torvalds static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr, 1321da177e4SLinus Torvalds unsigned long end, pgprot_t prot, struct page ***pages) 1331da177e4SLinus Torvalds { 1341da177e4SLinus Torvalds pud_t *pud; 1351da177e4SLinus Torvalds unsigned long next; 1361da177e4SLinus Torvalds 1371da177e4SLinus Torvalds pud = pud_alloc(&init_mm, pgd, addr); 1381da177e4SLinus Torvalds if (!pud) 1391da177e4SLinus Torvalds return -ENOMEM; 1401da177e4SLinus Torvalds do { 1411da177e4SLinus Torvalds next = pud_addr_end(addr, end); 1421da177e4SLinus Torvalds if (vmap_pmd_range(pud, addr, next, prot, pages)) 1431da177e4SLinus Torvalds return -ENOMEM; 1441da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1451da177e4SLinus Torvalds return 0; 1461da177e4SLinus Torvalds } 1471da177e4SLinus Torvalds 1481da177e4SLinus Torvalds int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1491da177e4SLinus Torvalds { 1501da177e4SLinus Torvalds pgd_t *pgd; 1511da177e4SLinus Torvalds unsigned long next; 1521da177e4SLinus Torvalds unsigned long addr = (unsigned long) area->addr; 1531da177e4SLinus Torvalds unsigned long end = addr + area->size - PAGE_SIZE; 1541da177e4SLinus Torvalds int err; 1551da177e4SLinus Torvalds 1561da177e4SLinus Torvalds BUG_ON(addr >= end); 1571da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1581da177e4SLinus Torvalds do { 1591da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 1601da177e4SLinus Torvalds err = vmap_pud_range(pgd, addr, next, prot, pages); 1611da177e4SLinus Torvalds if (err) 1621da177e4SLinus Torvalds break; 1631da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1641da177e4SLinus Torvalds flush_cache_vmap((unsigned long) area->addr, end); 1651da177e4SLinus Torvalds return err; 1661da177e4SLinus Torvalds } 1675992b6daSRusty Russell EXPORT_SYMBOL_GPL(map_vm_area); 1681da177e4SLinus Torvalds 16948667e7aSChristoph Lameter /* 17048667e7aSChristoph Lameter * Map a vmalloc()-space virtual address to the physical page. 17148667e7aSChristoph Lameter */ 172*b3bdda02SChristoph Lameter struct page *vmalloc_to_page(const void *vmalloc_addr) 17348667e7aSChristoph Lameter { 17448667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 17548667e7aSChristoph Lameter struct page *page = NULL; 17648667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 17748667e7aSChristoph Lameter pud_t *pud; 17848667e7aSChristoph Lameter pmd_t *pmd; 17948667e7aSChristoph Lameter pte_t *ptep, pte; 18048667e7aSChristoph Lameter 18148667e7aSChristoph Lameter if (!pgd_none(*pgd)) { 18248667e7aSChristoph Lameter pud = pud_offset(pgd, addr); 18348667e7aSChristoph Lameter if (!pud_none(*pud)) { 18448667e7aSChristoph Lameter pmd = pmd_offset(pud, addr); 18548667e7aSChristoph Lameter if (!pmd_none(*pmd)) { 18648667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 18748667e7aSChristoph Lameter pte = *ptep; 18848667e7aSChristoph Lameter if (pte_present(pte)) 18948667e7aSChristoph Lameter page = pte_page(pte); 19048667e7aSChristoph Lameter pte_unmap(ptep); 19148667e7aSChristoph Lameter } 19248667e7aSChristoph Lameter } 19348667e7aSChristoph Lameter } 19448667e7aSChristoph Lameter return page; 19548667e7aSChristoph Lameter } 19648667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_page); 19748667e7aSChristoph Lameter 19848667e7aSChristoph Lameter /* 19948667e7aSChristoph Lameter * Map a vmalloc()-space virtual address to the physical page frame number. 20048667e7aSChristoph Lameter */ 201*b3bdda02SChristoph Lameter unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 20248667e7aSChristoph Lameter { 20348667e7aSChristoph Lameter return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 20448667e7aSChristoph Lameter } 20548667e7aSChristoph Lameter EXPORT_SYMBOL(vmalloc_to_pfn); 20648667e7aSChristoph Lameter 20752fd24caSGiridhar Pemmasani static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, 20852fd24caSGiridhar Pemmasani unsigned long start, unsigned long end, 20952fd24caSGiridhar Pemmasani int node, gfp_t gfp_mask) 2101da177e4SLinus Torvalds { 2111da177e4SLinus Torvalds struct vm_struct **p, *tmp, *area; 2121da177e4SLinus Torvalds unsigned long align = 1; 2131da177e4SLinus Torvalds unsigned long addr; 2141da177e4SLinus Torvalds 21552fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 2161da177e4SLinus Torvalds if (flags & VM_IOREMAP) { 2171da177e4SLinus Torvalds int bit = fls(size); 2181da177e4SLinus Torvalds 2191da177e4SLinus Torvalds if (bit > IOREMAP_MAX_ORDER) 2201da177e4SLinus Torvalds bit = IOREMAP_MAX_ORDER; 2211da177e4SLinus Torvalds else if (bit < PAGE_SHIFT) 2221da177e4SLinus Torvalds bit = PAGE_SHIFT; 2231da177e4SLinus Torvalds 2241da177e4SLinus Torvalds align = 1ul << bit; 2251da177e4SLinus Torvalds } 2261da177e4SLinus Torvalds addr = ALIGN(start, align); 2271da177e4SLinus Torvalds size = PAGE_ALIGN(size); 22831be8309SOGAWA Hirofumi if (unlikely(!size)) 22931be8309SOGAWA Hirofumi return NULL; 2301da177e4SLinus Torvalds 2316cb06229SChristoph Lameter area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 2326cb06229SChristoph Lameter 2331da177e4SLinus Torvalds if (unlikely(!area)) 2341da177e4SLinus Torvalds return NULL; 2351da177e4SLinus Torvalds 2361da177e4SLinus Torvalds /* 2371da177e4SLinus Torvalds * We always allocate a guard page. 2381da177e4SLinus Torvalds */ 2391da177e4SLinus Torvalds size += PAGE_SIZE; 2401da177e4SLinus Torvalds 2411da177e4SLinus Torvalds write_lock(&vmlist_lock); 2421da177e4SLinus Torvalds for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { 2431da177e4SLinus Torvalds if ((unsigned long)tmp->addr < addr) { 2441da177e4SLinus Torvalds if((unsigned long)tmp->addr + tmp->size >= addr) 2451da177e4SLinus Torvalds addr = ALIGN(tmp->size + 2461da177e4SLinus Torvalds (unsigned long)tmp->addr, align); 2471da177e4SLinus Torvalds continue; 2481da177e4SLinus Torvalds } 2491da177e4SLinus Torvalds if ((size + addr) < addr) 2501da177e4SLinus Torvalds goto out; 2511da177e4SLinus Torvalds if (size + addr <= (unsigned long)tmp->addr) 2521da177e4SLinus Torvalds goto found; 2531da177e4SLinus Torvalds addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align); 2541da177e4SLinus Torvalds if (addr > end - size) 2551da177e4SLinus Torvalds goto out; 2561da177e4SLinus Torvalds } 2571da177e4SLinus Torvalds 2581da177e4SLinus Torvalds found: 2591da177e4SLinus Torvalds area->next = *p; 2601da177e4SLinus Torvalds *p = area; 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds area->flags = flags; 2631da177e4SLinus Torvalds area->addr = (void *)addr; 2641da177e4SLinus Torvalds area->size = size; 2651da177e4SLinus Torvalds area->pages = NULL; 2661da177e4SLinus Torvalds area->nr_pages = 0; 2671da177e4SLinus Torvalds area->phys_addr = 0; 2681da177e4SLinus Torvalds write_unlock(&vmlist_lock); 2691da177e4SLinus Torvalds 2701da177e4SLinus Torvalds return area; 2711da177e4SLinus Torvalds 2721da177e4SLinus Torvalds out: 2731da177e4SLinus Torvalds write_unlock(&vmlist_lock); 2741da177e4SLinus Torvalds kfree(area); 2751da177e4SLinus Torvalds if (printk_ratelimit()) 2761da177e4SLinus Torvalds printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n"); 2771da177e4SLinus Torvalds return NULL; 2781da177e4SLinus Torvalds } 2791da177e4SLinus Torvalds 280930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 281930fc45aSChristoph Lameter unsigned long start, unsigned long end) 282930fc45aSChristoph Lameter { 28352fd24caSGiridhar Pemmasani return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL); 284930fc45aSChristoph Lameter } 2855992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 286930fc45aSChristoph Lameter 2871da177e4SLinus Torvalds /** 288183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 2891da177e4SLinus Torvalds * @size: size of the area 2901da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 2911da177e4SLinus Torvalds * 2921da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 2931da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 2941da177e4SLinus Torvalds * on success or %NULL on failure. 2951da177e4SLinus Torvalds */ 2961da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 2971da177e4SLinus Torvalds { 2981da177e4SLinus Torvalds return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 2991da177e4SLinus Torvalds } 3001da177e4SLinus Torvalds 30152fd24caSGiridhar Pemmasani struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 30252fd24caSGiridhar Pemmasani int node, gfp_t gfp_mask) 303930fc45aSChristoph Lameter { 30452fd24caSGiridhar Pemmasani return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 30552fd24caSGiridhar Pemmasani gfp_mask); 306930fc45aSChristoph Lameter } 307930fc45aSChristoph Lameter 3087856dfebSAndi Kleen /* Caller must hold vmlist_lock */ 309*b3bdda02SChristoph Lameter static struct vm_struct *__find_vm_area(const void *addr) 31083342314SNick Piggin { 31183342314SNick Piggin struct vm_struct *tmp; 31283342314SNick Piggin 31383342314SNick Piggin for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { 31483342314SNick Piggin if (tmp->addr == addr) 31583342314SNick Piggin break; 31683342314SNick Piggin } 31783342314SNick Piggin 31883342314SNick Piggin return tmp; 31983342314SNick Piggin } 32083342314SNick Piggin 32183342314SNick Piggin /* Caller must hold vmlist_lock */ 322*b3bdda02SChristoph Lameter static struct vm_struct *__remove_vm_area(const void *addr) 3237856dfebSAndi Kleen { 3247856dfebSAndi Kleen struct vm_struct **p, *tmp; 3257856dfebSAndi Kleen 3267856dfebSAndi Kleen for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { 3277856dfebSAndi Kleen if (tmp->addr == addr) 3287856dfebSAndi Kleen goto found; 3297856dfebSAndi Kleen } 3307856dfebSAndi Kleen return NULL; 3317856dfebSAndi Kleen 3327856dfebSAndi Kleen found: 3337856dfebSAndi Kleen unmap_vm_area(tmp); 3347856dfebSAndi Kleen *p = tmp->next; 3357856dfebSAndi Kleen 3367856dfebSAndi Kleen /* 3377856dfebSAndi Kleen * Remove the guard page. 3387856dfebSAndi Kleen */ 3397856dfebSAndi Kleen tmp->size -= PAGE_SIZE; 3407856dfebSAndi Kleen return tmp; 3417856dfebSAndi Kleen } 3427856dfebSAndi Kleen 3431da177e4SLinus Torvalds /** 344183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 3451da177e4SLinus Torvalds * @addr: base address 3461da177e4SLinus Torvalds * 3471da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 3481da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 3497856dfebSAndi Kleen * on SMP machines, except for its size or flags. 3501da177e4SLinus Torvalds */ 351*b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 3521da177e4SLinus Torvalds { 3537856dfebSAndi Kleen struct vm_struct *v; 3541da177e4SLinus Torvalds write_lock(&vmlist_lock); 3557856dfebSAndi Kleen v = __remove_vm_area(addr); 3561da177e4SLinus Torvalds write_unlock(&vmlist_lock); 3577856dfebSAndi Kleen return v; 3581da177e4SLinus Torvalds } 3591da177e4SLinus Torvalds 360*b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 3611da177e4SLinus Torvalds { 3621da177e4SLinus Torvalds struct vm_struct *area; 3631da177e4SLinus Torvalds 3641da177e4SLinus Torvalds if (!addr) 3651da177e4SLinus Torvalds return; 3661da177e4SLinus Torvalds 3671da177e4SLinus Torvalds if ((PAGE_SIZE-1) & (unsigned long)addr) { 3681da177e4SLinus Torvalds printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 3691da177e4SLinus Torvalds WARN_ON(1); 3701da177e4SLinus Torvalds return; 3711da177e4SLinus Torvalds } 3721da177e4SLinus Torvalds 3731da177e4SLinus Torvalds area = remove_vm_area(addr); 3741da177e4SLinus Torvalds if (unlikely(!area)) { 3751da177e4SLinus Torvalds printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 3761da177e4SLinus Torvalds addr); 3771da177e4SLinus Torvalds WARN_ON(1); 3781da177e4SLinus Torvalds return; 3791da177e4SLinus Torvalds } 3801da177e4SLinus Torvalds 3819a11b49aSIngo Molnar debug_check_no_locks_freed(addr, area->size); 3829a11b49aSIngo Molnar 3831da177e4SLinus Torvalds if (deallocate_pages) { 3841da177e4SLinus Torvalds int i; 3851da177e4SLinus Torvalds 3861da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 3875aae277eSEric Sesterhenn BUG_ON(!area->pages[i]); 3881da177e4SLinus Torvalds __free_page(area->pages[i]); 3891da177e4SLinus Torvalds } 3901da177e4SLinus Torvalds 3918757d5faSJan Kiszka if (area->flags & VM_VPAGES) 3921da177e4SLinus Torvalds vfree(area->pages); 3931da177e4SLinus Torvalds else 3941da177e4SLinus Torvalds kfree(area->pages); 3951da177e4SLinus Torvalds } 3961da177e4SLinus Torvalds 3971da177e4SLinus Torvalds kfree(area); 3981da177e4SLinus Torvalds return; 3991da177e4SLinus Torvalds } 4001da177e4SLinus Torvalds 4011da177e4SLinus Torvalds /** 4021da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 4031da177e4SLinus Torvalds * @addr: memory base address 4041da177e4SLinus Torvalds * 405183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 40680e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 40780e93effSPekka Enberg * NULL, no operation is performed. 4081da177e4SLinus Torvalds * 40980e93effSPekka Enberg * Must not be called in interrupt context. 4101da177e4SLinus Torvalds */ 411*b3bdda02SChristoph Lameter void vfree(const void *addr) 4121da177e4SLinus Torvalds { 4131da177e4SLinus Torvalds BUG_ON(in_interrupt()); 4141da177e4SLinus Torvalds __vunmap(addr, 1); 4151da177e4SLinus Torvalds } 4161da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 4171da177e4SLinus Torvalds 4181da177e4SLinus Torvalds /** 4191da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 4201da177e4SLinus Torvalds * @addr: memory base address 4211da177e4SLinus Torvalds * 4221da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 4231da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 4241da177e4SLinus Torvalds * 42580e93effSPekka Enberg * Must not be called in interrupt context. 4261da177e4SLinus Torvalds */ 427*b3bdda02SChristoph Lameter void vunmap(const void *addr) 4281da177e4SLinus Torvalds { 4291da177e4SLinus Torvalds BUG_ON(in_interrupt()); 4301da177e4SLinus Torvalds __vunmap(addr, 0); 4311da177e4SLinus Torvalds } 4321da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 4331da177e4SLinus Torvalds 4341da177e4SLinus Torvalds /** 4351da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 4361da177e4SLinus Torvalds * @pages: array of page pointers 4371da177e4SLinus Torvalds * @count: number of pages to map 4381da177e4SLinus Torvalds * @flags: vm_area->flags 4391da177e4SLinus Torvalds * @prot: page protection for the mapping 4401da177e4SLinus Torvalds * 4411da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 4421da177e4SLinus Torvalds * space. 4431da177e4SLinus Torvalds */ 4441da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 4451da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 4461da177e4SLinus Torvalds { 4471da177e4SLinus Torvalds struct vm_struct *area; 4481da177e4SLinus Torvalds 4491da177e4SLinus Torvalds if (count > num_physpages) 4501da177e4SLinus Torvalds return NULL; 4511da177e4SLinus Torvalds 4521da177e4SLinus Torvalds area = get_vm_area((count << PAGE_SHIFT), flags); 4531da177e4SLinus Torvalds if (!area) 4541da177e4SLinus Torvalds return NULL; 4551da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) { 4561da177e4SLinus Torvalds vunmap(area->addr); 4571da177e4SLinus Torvalds return NULL; 4581da177e4SLinus Torvalds } 4591da177e4SLinus Torvalds 4601da177e4SLinus Torvalds return area->addr; 4611da177e4SLinus Torvalds } 4621da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 4631da177e4SLinus Torvalds 464930fc45aSChristoph Lameter void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 465930fc45aSChristoph Lameter pgprot_t prot, int node) 4661da177e4SLinus Torvalds { 4671da177e4SLinus Torvalds struct page **pages; 4681da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 4691da177e4SLinus Torvalds 4701da177e4SLinus Torvalds nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 4711da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 4721da177e4SLinus Torvalds 4731da177e4SLinus Torvalds area->nr_pages = nr_pages; 4741da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 4758757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 47694f6030cSChristoph Lameter pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 47794f6030cSChristoph Lameter PAGE_KERNEL, node); 4788757d5faSJan Kiszka area->flags |= VM_VPAGES; 479286e1ea3SAndrew Morton } else { 480286e1ea3SAndrew Morton pages = kmalloc_node(array_size, 4816cb06229SChristoph Lameter (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, 482286e1ea3SAndrew Morton node); 483286e1ea3SAndrew Morton } 4841da177e4SLinus Torvalds area->pages = pages; 4851da177e4SLinus Torvalds if (!area->pages) { 4861da177e4SLinus Torvalds remove_vm_area(area->addr); 4871da177e4SLinus Torvalds kfree(area); 4881da177e4SLinus Torvalds return NULL; 4891da177e4SLinus Torvalds } 4901da177e4SLinus Torvalds 4911da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 492930fc45aSChristoph Lameter if (node < 0) 4931da177e4SLinus Torvalds area->pages[i] = alloc_page(gfp_mask); 494930fc45aSChristoph Lameter else 495930fc45aSChristoph Lameter area->pages[i] = alloc_pages_node(node, gfp_mask, 0); 4961da177e4SLinus Torvalds if (unlikely(!area->pages[i])) { 4971da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 4981da177e4SLinus Torvalds area->nr_pages = i; 4991da177e4SLinus Torvalds goto fail; 5001da177e4SLinus Torvalds } 5011da177e4SLinus Torvalds } 5021da177e4SLinus Torvalds 5031da177e4SLinus Torvalds if (map_vm_area(area, prot, &pages)) 5041da177e4SLinus Torvalds goto fail; 5051da177e4SLinus Torvalds return area->addr; 5061da177e4SLinus Torvalds 5071da177e4SLinus Torvalds fail: 5081da177e4SLinus Torvalds vfree(area->addr); 5091da177e4SLinus Torvalds return NULL; 5101da177e4SLinus Torvalds } 5111da177e4SLinus Torvalds 512930fc45aSChristoph Lameter void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 513930fc45aSChristoph Lameter { 514930fc45aSChristoph Lameter return __vmalloc_area_node(area, gfp_mask, prot, -1); 515930fc45aSChristoph Lameter } 516930fc45aSChristoph Lameter 5171da177e4SLinus Torvalds /** 518930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 5191da177e4SLinus Torvalds * @size: allocation size 5201da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 5211da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 522d44e0780SRandy Dunlap * @node: node to use for allocation or -1 5231da177e4SLinus Torvalds * 5241da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 5251da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 5261da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 5271da177e4SLinus Torvalds */ 528b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 529930fc45aSChristoph Lameter int node) 5301da177e4SLinus Torvalds { 5311da177e4SLinus Torvalds struct vm_struct *area; 5321da177e4SLinus Torvalds 5331da177e4SLinus Torvalds size = PAGE_ALIGN(size); 5341da177e4SLinus Torvalds if (!size || (size >> PAGE_SHIFT) > num_physpages) 5351da177e4SLinus Torvalds return NULL; 5361da177e4SLinus Torvalds 53752fd24caSGiridhar Pemmasani area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask); 5381da177e4SLinus Torvalds if (!area) 5391da177e4SLinus Torvalds return NULL; 5401da177e4SLinus Torvalds 541930fc45aSChristoph Lameter return __vmalloc_area_node(area, gfp_mask, prot, node); 5421da177e4SLinus Torvalds } 5431da177e4SLinus Torvalds 544930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 545930fc45aSChristoph Lameter { 546930fc45aSChristoph Lameter return __vmalloc_node(size, gfp_mask, prot, -1); 547930fc45aSChristoph Lameter } 5481da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 5491da177e4SLinus Torvalds 5501da177e4SLinus Torvalds /** 5511da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 5521da177e4SLinus Torvalds * @size: allocation size 5531da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 5541da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 5551da177e4SLinus Torvalds * 556c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 5571da177e4SLinus Torvalds * use __vmalloc() instead. 5581da177e4SLinus Torvalds */ 5591da177e4SLinus Torvalds void *vmalloc(unsigned long size) 5601da177e4SLinus Torvalds { 5611da177e4SLinus Torvalds return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 5621da177e4SLinus Torvalds } 5631da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 5641da177e4SLinus Torvalds 565930fc45aSChristoph Lameter /** 566ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 56783342314SNick Piggin * @size: allocation size 568ead04089SRolf Eike Beer * 569ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 570ead04089SRolf Eike Beer * without leaking data. 57183342314SNick Piggin */ 57283342314SNick Piggin void *vmalloc_user(unsigned long size) 57383342314SNick Piggin { 57483342314SNick Piggin struct vm_struct *area; 57583342314SNick Piggin void *ret; 57683342314SNick Piggin 57783342314SNick Piggin ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); 5782b4ac44eSEric Dumazet if (ret) { 57983342314SNick Piggin write_lock(&vmlist_lock); 58083342314SNick Piggin area = __find_vm_area(ret); 58183342314SNick Piggin area->flags |= VM_USERMAP; 58283342314SNick Piggin write_unlock(&vmlist_lock); 5832b4ac44eSEric Dumazet } 58483342314SNick Piggin return ret; 58583342314SNick Piggin } 58683342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 58783342314SNick Piggin 58883342314SNick Piggin /** 589930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 590930fc45aSChristoph Lameter * @size: allocation size 591d44e0780SRandy Dunlap * @node: numa node 592930fc45aSChristoph Lameter * 593930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 594930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 595930fc45aSChristoph Lameter * 596c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 597930fc45aSChristoph Lameter * use __vmalloc() instead. 598930fc45aSChristoph Lameter */ 599930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 600930fc45aSChristoph Lameter { 601930fc45aSChristoph Lameter return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); 602930fc45aSChristoph Lameter } 603930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 604930fc45aSChristoph Lameter 6054dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC 6064dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL 6074dc3b16bSPavel Pisa #endif 6084dc3b16bSPavel Pisa 6091da177e4SLinus Torvalds /** 6101da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 6111da177e4SLinus Torvalds * @size: allocation size 6121da177e4SLinus Torvalds * 6131da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 6141da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 6151da177e4SLinus Torvalds * executable kernel virtual space. 6161da177e4SLinus Torvalds * 617c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 6181da177e4SLinus Torvalds * use __vmalloc() instead. 6191da177e4SLinus Torvalds */ 6201da177e4SLinus Torvalds 6211da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 6221da177e4SLinus Torvalds { 6231da177e4SLinus Torvalds return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); 6241da177e4SLinus Torvalds } 6251da177e4SLinus Torvalds 6260d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 6277ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 6280d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 6297ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 6300d08e0d3SAndi Kleen #else 6310d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL 6320d08e0d3SAndi Kleen #endif 6330d08e0d3SAndi Kleen 6341da177e4SLinus Torvalds /** 6351da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 6361da177e4SLinus Torvalds * @size: allocation size 6371da177e4SLinus Torvalds * 6381da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 6391da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 6401da177e4SLinus Torvalds */ 6411da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 6421da177e4SLinus Torvalds { 6430d08e0d3SAndi Kleen return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL); 6441da177e4SLinus Torvalds } 6451da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 6461da177e4SLinus Torvalds 64783342314SNick Piggin /** 648ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 64983342314SNick Piggin * @size: allocation size 650ead04089SRolf Eike Beer * 651ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 652ead04089SRolf Eike Beer * mapped to userspace without leaking data. 65383342314SNick Piggin */ 65483342314SNick Piggin void *vmalloc_32_user(unsigned long size) 65583342314SNick Piggin { 65683342314SNick Piggin struct vm_struct *area; 65783342314SNick Piggin void *ret; 65883342314SNick Piggin 6590d08e0d3SAndi Kleen ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL); 6602b4ac44eSEric Dumazet if (ret) { 66183342314SNick Piggin write_lock(&vmlist_lock); 66283342314SNick Piggin area = __find_vm_area(ret); 66383342314SNick Piggin area->flags |= VM_USERMAP; 66483342314SNick Piggin write_unlock(&vmlist_lock); 6652b4ac44eSEric Dumazet } 66683342314SNick Piggin return ret; 66783342314SNick Piggin } 66883342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 66983342314SNick Piggin 6701da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 6711da177e4SLinus Torvalds { 6721da177e4SLinus Torvalds struct vm_struct *tmp; 6731da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 6741da177e4SLinus Torvalds unsigned long n; 6751da177e4SLinus Torvalds 6761da177e4SLinus Torvalds /* Don't allow overflow */ 6771da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 6781da177e4SLinus Torvalds count = -(unsigned long) addr; 6791da177e4SLinus Torvalds 6801da177e4SLinus Torvalds read_lock(&vmlist_lock); 6811da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 6821da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 6831da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 6841da177e4SLinus Torvalds continue; 6851da177e4SLinus Torvalds while (addr < vaddr) { 6861da177e4SLinus Torvalds if (count == 0) 6871da177e4SLinus Torvalds goto finished; 6881da177e4SLinus Torvalds *buf = '\0'; 6891da177e4SLinus Torvalds buf++; 6901da177e4SLinus Torvalds addr++; 6911da177e4SLinus Torvalds count--; 6921da177e4SLinus Torvalds } 6931da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 6941da177e4SLinus Torvalds do { 6951da177e4SLinus Torvalds if (count == 0) 6961da177e4SLinus Torvalds goto finished; 6971da177e4SLinus Torvalds *buf = *addr; 6981da177e4SLinus Torvalds buf++; 6991da177e4SLinus Torvalds addr++; 7001da177e4SLinus Torvalds count--; 7011da177e4SLinus Torvalds } while (--n > 0); 7021da177e4SLinus Torvalds } 7031da177e4SLinus Torvalds finished: 7041da177e4SLinus Torvalds read_unlock(&vmlist_lock); 7051da177e4SLinus Torvalds return buf - buf_start; 7061da177e4SLinus Torvalds } 7071da177e4SLinus Torvalds 7081da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 7091da177e4SLinus Torvalds { 7101da177e4SLinus Torvalds struct vm_struct *tmp; 7111da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 7121da177e4SLinus Torvalds unsigned long n; 7131da177e4SLinus Torvalds 7141da177e4SLinus Torvalds /* Don't allow overflow */ 7151da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 7161da177e4SLinus Torvalds count = -(unsigned long) addr; 7171da177e4SLinus Torvalds 7181da177e4SLinus Torvalds read_lock(&vmlist_lock); 7191da177e4SLinus Torvalds for (tmp = vmlist; tmp; tmp = tmp->next) { 7201da177e4SLinus Torvalds vaddr = (char *) tmp->addr; 7211da177e4SLinus Torvalds if (addr >= vaddr + tmp->size - PAGE_SIZE) 7221da177e4SLinus Torvalds continue; 7231da177e4SLinus Torvalds while (addr < vaddr) { 7241da177e4SLinus Torvalds if (count == 0) 7251da177e4SLinus Torvalds goto finished; 7261da177e4SLinus Torvalds buf++; 7271da177e4SLinus Torvalds addr++; 7281da177e4SLinus Torvalds count--; 7291da177e4SLinus Torvalds } 7301da177e4SLinus Torvalds n = vaddr + tmp->size - PAGE_SIZE - addr; 7311da177e4SLinus Torvalds do { 7321da177e4SLinus Torvalds if (count == 0) 7331da177e4SLinus Torvalds goto finished; 7341da177e4SLinus Torvalds *addr = *buf; 7351da177e4SLinus Torvalds buf++; 7361da177e4SLinus Torvalds addr++; 7371da177e4SLinus Torvalds count--; 7381da177e4SLinus Torvalds } while (--n > 0); 7391da177e4SLinus Torvalds } 7401da177e4SLinus Torvalds finished: 7411da177e4SLinus Torvalds read_unlock(&vmlist_lock); 7421da177e4SLinus Torvalds return buf - buf_start; 7431da177e4SLinus Torvalds } 74483342314SNick Piggin 74583342314SNick Piggin /** 74683342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 74783342314SNick Piggin * @vma: vma to cover (map full range of vma) 74883342314SNick Piggin * @addr: vmalloc memory 74983342314SNick Piggin * @pgoff: number of pages into addr before first page to map 75083342314SNick Piggin * @returns: 0 for success, -Exxx on failure 75183342314SNick Piggin * 75283342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 75383342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 75483342314SNick Piggin * that criteria isn't met. 75583342314SNick Piggin * 75672fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 75783342314SNick Piggin */ 75883342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 75983342314SNick Piggin unsigned long pgoff) 76083342314SNick Piggin { 76183342314SNick Piggin struct vm_struct *area; 76283342314SNick Piggin unsigned long uaddr = vma->vm_start; 76383342314SNick Piggin unsigned long usize = vma->vm_end - vma->vm_start; 76483342314SNick Piggin int ret; 76583342314SNick Piggin 76683342314SNick Piggin if ((PAGE_SIZE-1) & (unsigned long)addr) 76783342314SNick Piggin return -EINVAL; 76883342314SNick Piggin 76983342314SNick Piggin read_lock(&vmlist_lock); 77083342314SNick Piggin area = __find_vm_area(addr); 77183342314SNick Piggin if (!area) 77283342314SNick Piggin goto out_einval_locked; 77383342314SNick Piggin 77483342314SNick Piggin if (!(area->flags & VM_USERMAP)) 77583342314SNick Piggin goto out_einval_locked; 77683342314SNick Piggin 77783342314SNick Piggin if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 77883342314SNick Piggin goto out_einval_locked; 77983342314SNick Piggin read_unlock(&vmlist_lock); 78083342314SNick Piggin 78183342314SNick Piggin addr += pgoff << PAGE_SHIFT; 78283342314SNick Piggin do { 78383342314SNick Piggin struct page *page = vmalloc_to_page(addr); 78483342314SNick Piggin ret = vm_insert_page(vma, uaddr, page); 78583342314SNick Piggin if (ret) 78683342314SNick Piggin return ret; 78783342314SNick Piggin 78883342314SNick Piggin uaddr += PAGE_SIZE; 78983342314SNick Piggin addr += PAGE_SIZE; 79083342314SNick Piggin usize -= PAGE_SIZE; 79183342314SNick Piggin } while (usize > 0); 79283342314SNick Piggin 79383342314SNick Piggin /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 79483342314SNick Piggin vma->vm_flags |= VM_RESERVED; 79583342314SNick Piggin 79683342314SNick Piggin return ret; 79783342314SNick Piggin 79883342314SNick Piggin out_einval_locked: 79983342314SNick Piggin read_unlock(&vmlist_lock); 80083342314SNick Piggin return -EINVAL; 80183342314SNick Piggin } 80283342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 80383342314SNick Piggin 8041eeb66a1SChristoph Hellwig /* 8051eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 8061eeb66a1SChristoph Hellwig * have one. 8071eeb66a1SChristoph Hellwig */ 8081eeb66a1SChristoph Hellwig void __attribute__((weak)) vmalloc_sync_all(void) 8091eeb66a1SChristoph Hellwig { 8101eeb66a1SChristoph Hellwig } 8115f4352fbSJeremy Fitzhardinge 8125f4352fbSJeremy Fitzhardinge 8135f4352fbSJeremy Fitzhardinge static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) 8145f4352fbSJeremy Fitzhardinge { 8155f4352fbSJeremy Fitzhardinge /* apply_to_page_range() does all the hard work. */ 8165f4352fbSJeremy Fitzhardinge return 0; 8175f4352fbSJeremy Fitzhardinge } 8185f4352fbSJeremy Fitzhardinge 8195f4352fbSJeremy Fitzhardinge /** 8205f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 8215f4352fbSJeremy Fitzhardinge * @size: size of the area 8225f4352fbSJeremy Fitzhardinge * @returns: NULL on failure, vm_struct on success 8235f4352fbSJeremy Fitzhardinge * 8245f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 8255f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 8265f4352fbSJeremy Fitzhardinge * are created. If the kernel address space is not shared 8275f4352fbSJeremy Fitzhardinge * between processes, it syncs the pagetable across all 8285f4352fbSJeremy Fitzhardinge * processes. 8295f4352fbSJeremy Fitzhardinge */ 8305f4352fbSJeremy Fitzhardinge struct vm_struct *alloc_vm_area(size_t size) 8315f4352fbSJeremy Fitzhardinge { 8325f4352fbSJeremy Fitzhardinge struct vm_struct *area; 8335f4352fbSJeremy Fitzhardinge 8345f4352fbSJeremy Fitzhardinge area = get_vm_area(size, VM_IOREMAP); 8355f4352fbSJeremy Fitzhardinge if (area == NULL) 8365f4352fbSJeremy Fitzhardinge return NULL; 8375f4352fbSJeremy Fitzhardinge 8385f4352fbSJeremy Fitzhardinge /* 8395f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 8405f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 8415f4352fbSJeremy Fitzhardinge */ 8425f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 8435f4352fbSJeremy Fitzhardinge area->size, f, NULL)) { 8445f4352fbSJeremy Fitzhardinge free_vm_area(area); 8455f4352fbSJeremy Fitzhardinge return NULL; 8465f4352fbSJeremy Fitzhardinge } 8475f4352fbSJeremy Fitzhardinge 8485f4352fbSJeremy Fitzhardinge /* Make sure the pagetables are constructed in process kernel 8495f4352fbSJeremy Fitzhardinge mappings */ 8505f4352fbSJeremy Fitzhardinge vmalloc_sync_all(); 8515f4352fbSJeremy Fitzhardinge 8525f4352fbSJeremy Fitzhardinge return area; 8535f4352fbSJeremy Fitzhardinge } 8545f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 8555f4352fbSJeremy Fitzhardinge 8565f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 8575f4352fbSJeremy Fitzhardinge { 8585f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 8595f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 8605f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 8615f4352fbSJeremy Fitzhardinge kfree(area); 8625f4352fbSJeremy Fitzhardinge } 8635f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 864