xref: /openbmc/linux/mm/vmalloc.c (revision 7ac674f52778b95450509357435320be1d795248)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/mm/vmalloc.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1993  Linus Torvalds
51da177e4SLinus Torvalds  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
61da177e4SLinus Torvalds  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
71da177e4SLinus Torvalds  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8930fc45aSChristoph Lameter  *  Numa awareness, Christoph Lameter, SGI, June 2005
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds #include <linux/mm.h>
121da177e4SLinus Torvalds #include <linux/module.h>
131da177e4SLinus Torvalds #include <linux/highmem.h>
141da177e4SLinus Torvalds #include <linux/slab.h>
151da177e4SLinus Torvalds #include <linux/spinlock.h>
161da177e4SLinus Torvalds #include <linux/interrupt.h>
171da177e4SLinus Torvalds 
181da177e4SLinus Torvalds #include <linux/vmalloc.h>
191da177e4SLinus Torvalds 
201da177e4SLinus Torvalds #include <asm/uaccess.h>
211da177e4SLinus Torvalds #include <asm/tlbflush.h>
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds 
241da177e4SLinus Torvalds DEFINE_RWLOCK(vmlist_lock);
251da177e4SLinus Torvalds struct vm_struct *vmlist;
261da177e4SLinus Torvalds 
27b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28b221385bSAdrian Bunk 			    int node);
29b221385bSAdrian Bunk 
301da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
311da177e4SLinus Torvalds {
321da177e4SLinus Torvalds 	pte_t *pte;
331da177e4SLinus Torvalds 
341da177e4SLinus Torvalds 	pte = pte_offset_kernel(pmd, addr);
351da177e4SLinus Torvalds 	do {
361da177e4SLinus Torvalds 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
371da177e4SLinus Torvalds 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
381da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
391da177e4SLinus Torvalds }
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
421da177e4SLinus Torvalds 						unsigned long end)
431da177e4SLinus Torvalds {
441da177e4SLinus Torvalds 	pmd_t *pmd;
451da177e4SLinus Torvalds 	unsigned long next;
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds 	pmd = pmd_offset(pud, addr);
481da177e4SLinus Torvalds 	do {
491da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
501da177e4SLinus Torvalds 		if (pmd_none_or_clear_bad(pmd))
511da177e4SLinus Torvalds 			continue;
521da177e4SLinus Torvalds 		vunmap_pte_range(pmd, addr, next);
531da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
541da177e4SLinus Torvalds }
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
571da177e4SLinus Torvalds 						unsigned long end)
581da177e4SLinus Torvalds {
591da177e4SLinus Torvalds 	pud_t *pud;
601da177e4SLinus Torvalds 	unsigned long next;
611da177e4SLinus Torvalds 
621da177e4SLinus Torvalds 	pud = pud_offset(pgd, addr);
631da177e4SLinus Torvalds 	do {
641da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
651da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(pud))
661da177e4SLinus Torvalds 			continue;
671da177e4SLinus Torvalds 		vunmap_pmd_range(pud, addr, next);
681da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
691da177e4SLinus Torvalds }
701da177e4SLinus Torvalds 
71c19c03fcSBenjamin Herrenschmidt void unmap_kernel_range(unsigned long addr, unsigned long size)
721da177e4SLinus Torvalds {
731da177e4SLinus Torvalds 	pgd_t *pgd;
741da177e4SLinus Torvalds 	unsigned long next;
75c19c03fcSBenjamin Herrenschmidt 	unsigned long start = addr;
76c19c03fcSBenjamin Herrenschmidt 	unsigned long end = addr + size;
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds 	BUG_ON(addr >= end);
791da177e4SLinus Torvalds 	pgd = pgd_offset_k(addr);
801da177e4SLinus Torvalds 	flush_cache_vunmap(addr, end);
811da177e4SLinus Torvalds 	do {
821da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
831da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(pgd))
841da177e4SLinus Torvalds 			continue;
851da177e4SLinus Torvalds 		vunmap_pud_range(pgd, addr, next);
861da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
87c19c03fcSBenjamin Herrenschmidt 	flush_tlb_kernel_range(start, end);
88c19c03fcSBenjamin Herrenschmidt }
89c19c03fcSBenjamin Herrenschmidt 
90c19c03fcSBenjamin Herrenschmidt static void unmap_vm_area(struct vm_struct *area)
91c19c03fcSBenjamin Herrenschmidt {
92c19c03fcSBenjamin Herrenschmidt 	unmap_kernel_range((unsigned long)area->addr, area->size);
931da177e4SLinus Torvalds }
941da177e4SLinus Torvalds 
951da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
961da177e4SLinus Torvalds 			unsigned long end, pgprot_t prot, struct page ***pages)
971da177e4SLinus Torvalds {
981da177e4SLinus Torvalds 	pte_t *pte;
991da177e4SLinus Torvalds 
100872fec16SHugh Dickins 	pte = pte_alloc_kernel(pmd, addr);
1011da177e4SLinus Torvalds 	if (!pte)
1021da177e4SLinus Torvalds 		return -ENOMEM;
1031da177e4SLinus Torvalds 	do {
1041da177e4SLinus Torvalds 		struct page *page = **pages;
1051da177e4SLinus Torvalds 		WARN_ON(!pte_none(*pte));
1061da177e4SLinus Torvalds 		if (!page)
1071da177e4SLinus Torvalds 			return -ENOMEM;
1081da177e4SLinus Torvalds 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
1091da177e4SLinus Torvalds 		(*pages)++;
1101da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
1111da177e4SLinus Torvalds 	return 0;
1121da177e4SLinus Torvalds }
1131da177e4SLinus Torvalds 
1141da177e4SLinus Torvalds static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
1151da177e4SLinus Torvalds 			unsigned long end, pgprot_t prot, struct page ***pages)
1161da177e4SLinus Torvalds {
1171da177e4SLinus Torvalds 	pmd_t *pmd;
1181da177e4SLinus Torvalds 	unsigned long next;
1191da177e4SLinus Torvalds 
1201da177e4SLinus Torvalds 	pmd = pmd_alloc(&init_mm, pud, addr);
1211da177e4SLinus Torvalds 	if (!pmd)
1221da177e4SLinus Torvalds 		return -ENOMEM;
1231da177e4SLinus Torvalds 	do {
1241da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
1251da177e4SLinus Torvalds 		if (vmap_pte_range(pmd, addr, next, prot, pages))
1261da177e4SLinus Torvalds 			return -ENOMEM;
1271da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
1281da177e4SLinus Torvalds 	return 0;
1291da177e4SLinus Torvalds }
1301da177e4SLinus Torvalds 
1311da177e4SLinus Torvalds static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
1321da177e4SLinus Torvalds 			unsigned long end, pgprot_t prot, struct page ***pages)
1331da177e4SLinus Torvalds {
1341da177e4SLinus Torvalds 	pud_t *pud;
1351da177e4SLinus Torvalds 	unsigned long next;
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds 	pud = pud_alloc(&init_mm, pgd, addr);
1381da177e4SLinus Torvalds 	if (!pud)
1391da177e4SLinus Torvalds 		return -ENOMEM;
1401da177e4SLinus Torvalds 	do {
1411da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
1421da177e4SLinus Torvalds 		if (vmap_pmd_range(pud, addr, next, prot, pages))
1431da177e4SLinus Torvalds 			return -ENOMEM;
1441da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
1451da177e4SLinus Torvalds 	return 0;
1461da177e4SLinus Torvalds }
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1491da177e4SLinus Torvalds {
1501da177e4SLinus Torvalds 	pgd_t *pgd;
1511da177e4SLinus Torvalds 	unsigned long next;
1521da177e4SLinus Torvalds 	unsigned long addr = (unsigned long) area->addr;
1531da177e4SLinus Torvalds 	unsigned long end = addr + area->size - PAGE_SIZE;
1541da177e4SLinus Torvalds 	int err;
1551da177e4SLinus Torvalds 
1561da177e4SLinus Torvalds 	BUG_ON(addr >= end);
1571da177e4SLinus Torvalds 	pgd = pgd_offset_k(addr);
1581da177e4SLinus Torvalds 	do {
1591da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
1601da177e4SLinus Torvalds 		err = vmap_pud_range(pgd, addr, next, prot, pages);
1611da177e4SLinus Torvalds 		if (err)
1621da177e4SLinus Torvalds 			break;
1631da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
1641da177e4SLinus Torvalds 	flush_cache_vmap((unsigned long) area->addr, end);
1651da177e4SLinus Torvalds 	return err;
1661da177e4SLinus Torvalds }
1671da177e4SLinus Torvalds 
16852fd24caSGiridhar Pemmasani static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
16952fd24caSGiridhar Pemmasani 					    unsigned long start, unsigned long end,
17052fd24caSGiridhar Pemmasani 					    int node, gfp_t gfp_mask)
1711da177e4SLinus Torvalds {
1721da177e4SLinus Torvalds 	struct vm_struct **p, *tmp, *area;
1731da177e4SLinus Torvalds 	unsigned long align = 1;
1741da177e4SLinus Torvalds 	unsigned long addr;
1751da177e4SLinus Torvalds 
17652fd24caSGiridhar Pemmasani 	BUG_ON(in_interrupt());
1771da177e4SLinus Torvalds 	if (flags & VM_IOREMAP) {
1781da177e4SLinus Torvalds 		int bit = fls(size);
1791da177e4SLinus Torvalds 
1801da177e4SLinus Torvalds 		if (bit > IOREMAP_MAX_ORDER)
1811da177e4SLinus Torvalds 			bit = IOREMAP_MAX_ORDER;
1821da177e4SLinus Torvalds 		else if (bit < PAGE_SHIFT)
1831da177e4SLinus Torvalds 			bit = PAGE_SHIFT;
1841da177e4SLinus Torvalds 
1851da177e4SLinus Torvalds 		align = 1ul << bit;
1861da177e4SLinus Torvalds 	}
1871da177e4SLinus Torvalds 	addr = ALIGN(start, align);
1881da177e4SLinus Torvalds 	size = PAGE_ALIGN(size);
18931be8309SOGAWA Hirofumi 	if (unlikely(!size))
19031be8309SOGAWA Hirofumi 		return NULL;
1911da177e4SLinus Torvalds 
1925211e6e6SGiridhar Pemmasani 	area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
1931da177e4SLinus Torvalds 	if (unlikely(!area))
1941da177e4SLinus Torvalds 		return NULL;
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds 	/*
1971da177e4SLinus Torvalds 	 * We always allocate a guard page.
1981da177e4SLinus Torvalds 	 */
1991da177e4SLinus Torvalds 	size += PAGE_SIZE;
2001da177e4SLinus Torvalds 
2011da177e4SLinus Torvalds 	write_lock(&vmlist_lock);
2021da177e4SLinus Torvalds 	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
2031da177e4SLinus Torvalds 		if ((unsigned long)tmp->addr < addr) {
2041da177e4SLinus Torvalds 			if((unsigned long)tmp->addr + tmp->size >= addr)
2051da177e4SLinus Torvalds 				addr = ALIGN(tmp->size +
2061da177e4SLinus Torvalds 					     (unsigned long)tmp->addr, align);
2071da177e4SLinus Torvalds 			continue;
2081da177e4SLinus Torvalds 		}
2091da177e4SLinus Torvalds 		if ((size + addr) < addr)
2101da177e4SLinus Torvalds 			goto out;
2111da177e4SLinus Torvalds 		if (size + addr <= (unsigned long)tmp->addr)
2121da177e4SLinus Torvalds 			goto found;
2131da177e4SLinus Torvalds 		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
2141da177e4SLinus Torvalds 		if (addr > end - size)
2151da177e4SLinus Torvalds 			goto out;
2161da177e4SLinus Torvalds 	}
2171da177e4SLinus Torvalds 
2181da177e4SLinus Torvalds found:
2191da177e4SLinus Torvalds 	area->next = *p;
2201da177e4SLinus Torvalds 	*p = area;
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds 	area->flags = flags;
2231da177e4SLinus Torvalds 	area->addr = (void *)addr;
2241da177e4SLinus Torvalds 	area->size = size;
2251da177e4SLinus Torvalds 	area->pages = NULL;
2261da177e4SLinus Torvalds 	area->nr_pages = 0;
2271da177e4SLinus Torvalds 	area->phys_addr = 0;
2281da177e4SLinus Torvalds 	write_unlock(&vmlist_lock);
2291da177e4SLinus Torvalds 
2301da177e4SLinus Torvalds 	return area;
2311da177e4SLinus Torvalds 
2321da177e4SLinus Torvalds out:
2331da177e4SLinus Torvalds 	write_unlock(&vmlist_lock);
2341da177e4SLinus Torvalds 	kfree(area);
2351da177e4SLinus Torvalds 	if (printk_ratelimit())
2361da177e4SLinus Torvalds 		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
2371da177e4SLinus Torvalds 	return NULL;
2381da177e4SLinus Torvalds }
2391da177e4SLinus Torvalds 
240930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
241930fc45aSChristoph Lameter 				unsigned long start, unsigned long end)
242930fc45aSChristoph Lameter {
24352fd24caSGiridhar Pemmasani 	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
244930fc45aSChristoph Lameter }
245930fc45aSChristoph Lameter 
2461da177e4SLinus Torvalds /**
2471da177e4SLinus Torvalds  *	get_vm_area  -  reserve a contingous kernel virtual area
2481da177e4SLinus Torvalds  *	@size:		size of the area
2491da177e4SLinus Torvalds  *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
2501da177e4SLinus Torvalds  *
2511da177e4SLinus Torvalds  *	Search an area of @size in the kernel virtual mapping area,
2521da177e4SLinus Torvalds  *	and reserved it for out purposes.  Returns the area descriptor
2531da177e4SLinus Torvalds  *	on success or %NULL on failure.
2541da177e4SLinus Torvalds  */
2551da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2561da177e4SLinus Torvalds {
2571da177e4SLinus Torvalds 	return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
2581da177e4SLinus Torvalds }
2591da177e4SLinus Torvalds 
26052fd24caSGiridhar Pemmasani struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
26152fd24caSGiridhar Pemmasani 				   int node, gfp_t gfp_mask)
262930fc45aSChristoph Lameter {
26352fd24caSGiridhar Pemmasani 	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
26452fd24caSGiridhar Pemmasani 				  gfp_mask);
265930fc45aSChristoph Lameter }
266930fc45aSChristoph Lameter 
2677856dfebSAndi Kleen /* Caller must hold vmlist_lock */
26883342314SNick Piggin static struct vm_struct *__find_vm_area(void *addr)
26983342314SNick Piggin {
27083342314SNick Piggin 	struct vm_struct *tmp;
27183342314SNick Piggin 
27283342314SNick Piggin 	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
27383342314SNick Piggin 		 if (tmp->addr == addr)
27483342314SNick Piggin 			break;
27583342314SNick Piggin 	}
27683342314SNick Piggin 
27783342314SNick Piggin 	return tmp;
27883342314SNick Piggin }
27983342314SNick Piggin 
28083342314SNick Piggin /* Caller must hold vmlist_lock */
281d24afc57SRolf Eike Beer static struct vm_struct *__remove_vm_area(void *addr)
2827856dfebSAndi Kleen {
2837856dfebSAndi Kleen 	struct vm_struct **p, *tmp;
2847856dfebSAndi Kleen 
2857856dfebSAndi Kleen 	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
2867856dfebSAndi Kleen 		 if (tmp->addr == addr)
2877856dfebSAndi Kleen 			 goto found;
2887856dfebSAndi Kleen 	}
2897856dfebSAndi Kleen 	return NULL;
2907856dfebSAndi Kleen 
2917856dfebSAndi Kleen found:
2927856dfebSAndi Kleen 	unmap_vm_area(tmp);
2937856dfebSAndi Kleen 	*p = tmp->next;
2947856dfebSAndi Kleen 
2957856dfebSAndi Kleen 	/*
2967856dfebSAndi Kleen 	 * Remove the guard page.
2977856dfebSAndi Kleen 	 */
2987856dfebSAndi Kleen 	tmp->size -= PAGE_SIZE;
2997856dfebSAndi Kleen 	return tmp;
3007856dfebSAndi Kleen }
3017856dfebSAndi Kleen 
3021da177e4SLinus Torvalds /**
3031da177e4SLinus Torvalds  *	remove_vm_area  -  find and remove a contingous kernel virtual area
3041da177e4SLinus Torvalds  *	@addr:		base address
3051da177e4SLinus Torvalds  *
3061da177e4SLinus Torvalds  *	Search for the kernel VM area starting at @addr, and remove it.
3071da177e4SLinus Torvalds  *	This function returns the found VM area, but using it is NOT safe
3087856dfebSAndi Kleen  *	on SMP machines, except for its size or flags.
3091da177e4SLinus Torvalds  */
3101da177e4SLinus Torvalds struct vm_struct *remove_vm_area(void *addr)
3111da177e4SLinus Torvalds {
3127856dfebSAndi Kleen 	struct vm_struct *v;
3131da177e4SLinus Torvalds 	write_lock(&vmlist_lock);
3147856dfebSAndi Kleen 	v = __remove_vm_area(addr);
3151da177e4SLinus Torvalds 	write_unlock(&vmlist_lock);
3167856dfebSAndi Kleen 	return v;
3171da177e4SLinus Torvalds }
3181da177e4SLinus Torvalds 
319d55e2ca8SBenjamin Herrenschmidt static void __vunmap(void *addr, int deallocate_pages)
3201da177e4SLinus Torvalds {
3211da177e4SLinus Torvalds 	struct vm_struct *area;
3221da177e4SLinus Torvalds 
3231da177e4SLinus Torvalds 	if (!addr)
3241da177e4SLinus Torvalds 		return;
3251da177e4SLinus Torvalds 
3261da177e4SLinus Torvalds 	if ((PAGE_SIZE-1) & (unsigned long)addr) {
3271da177e4SLinus Torvalds 		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
3281da177e4SLinus Torvalds 		WARN_ON(1);
3291da177e4SLinus Torvalds 		return;
3301da177e4SLinus Torvalds 	}
3311da177e4SLinus Torvalds 
3321da177e4SLinus Torvalds 	area = remove_vm_area(addr);
3331da177e4SLinus Torvalds 	if (unlikely(!area)) {
3341da177e4SLinus Torvalds 		printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
3351da177e4SLinus Torvalds 				addr);
3361da177e4SLinus Torvalds 		WARN_ON(1);
3371da177e4SLinus Torvalds 		return;
3381da177e4SLinus Torvalds 	}
3391da177e4SLinus Torvalds 
3409a11b49aSIngo Molnar 	debug_check_no_locks_freed(addr, area->size);
3419a11b49aSIngo Molnar 
3421da177e4SLinus Torvalds 	if (deallocate_pages) {
3431da177e4SLinus Torvalds 		int i;
3441da177e4SLinus Torvalds 
3451da177e4SLinus Torvalds 		for (i = 0; i < area->nr_pages; i++) {
3465aae277eSEric Sesterhenn 			BUG_ON(!area->pages[i]);
3471da177e4SLinus Torvalds 			__free_page(area->pages[i]);
3481da177e4SLinus Torvalds 		}
3491da177e4SLinus Torvalds 
3508757d5faSJan Kiszka 		if (area->flags & VM_VPAGES)
3511da177e4SLinus Torvalds 			vfree(area->pages);
3521da177e4SLinus Torvalds 		else
3531da177e4SLinus Torvalds 			kfree(area->pages);
3541da177e4SLinus Torvalds 	}
3551da177e4SLinus Torvalds 
3561da177e4SLinus Torvalds 	kfree(area);
3571da177e4SLinus Torvalds 	return;
3581da177e4SLinus Torvalds }
3591da177e4SLinus Torvalds 
3601da177e4SLinus Torvalds /**
3611da177e4SLinus Torvalds  *	vfree  -  release memory allocated by vmalloc()
3621da177e4SLinus Torvalds  *	@addr:		memory base address
3631da177e4SLinus Torvalds  *
3641da177e4SLinus Torvalds  *	Free the virtually contiguous memory area starting at @addr, as
36580e93effSPekka Enberg  *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
36680e93effSPekka Enberg  *	NULL, no operation is performed.
3671da177e4SLinus Torvalds  *
36880e93effSPekka Enberg  *	Must not be called in interrupt context.
3691da177e4SLinus Torvalds  */
3701da177e4SLinus Torvalds void vfree(void *addr)
3711da177e4SLinus Torvalds {
3721da177e4SLinus Torvalds 	BUG_ON(in_interrupt());
3731da177e4SLinus Torvalds 	__vunmap(addr, 1);
3741da177e4SLinus Torvalds }
3751da177e4SLinus Torvalds EXPORT_SYMBOL(vfree);
3761da177e4SLinus Torvalds 
3771da177e4SLinus Torvalds /**
3781da177e4SLinus Torvalds  *	vunmap  -  release virtual mapping obtained by vmap()
3791da177e4SLinus Torvalds  *	@addr:		memory base address
3801da177e4SLinus Torvalds  *
3811da177e4SLinus Torvalds  *	Free the virtually contiguous memory area starting at @addr,
3821da177e4SLinus Torvalds  *	which was created from the page array passed to vmap().
3831da177e4SLinus Torvalds  *
38480e93effSPekka Enberg  *	Must not be called in interrupt context.
3851da177e4SLinus Torvalds  */
3861da177e4SLinus Torvalds void vunmap(void *addr)
3871da177e4SLinus Torvalds {
3881da177e4SLinus Torvalds 	BUG_ON(in_interrupt());
3891da177e4SLinus Torvalds 	__vunmap(addr, 0);
3901da177e4SLinus Torvalds }
3911da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap);
3921da177e4SLinus Torvalds 
3931da177e4SLinus Torvalds /**
3941da177e4SLinus Torvalds  *	vmap  -  map an array of pages into virtually contiguous space
3951da177e4SLinus Torvalds  *	@pages:		array of page pointers
3961da177e4SLinus Torvalds  *	@count:		number of pages to map
3971da177e4SLinus Torvalds  *	@flags:		vm_area->flags
3981da177e4SLinus Torvalds  *	@prot:		page protection for the mapping
3991da177e4SLinus Torvalds  *
4001da177e4SLinus Torvalds  *	Maps @count pages from @pages into contiguous kernel virtual
4011da177e4SLinus Torvalds  *	space.
4021da177e4SLinus Torvalds  */
4031da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count,
4041da177e4SLinus Torvalds 		unsigned long flags, pgprot_t prot)
4051da177e4SLinus Torvalds {
4061da177e4SLinus Torvalds 	struct vm_struct *area;
4071da177e4SLinus Torvalds 
4081da177e4SLinus Torvalds 	if (count > num_physpages)
4091da177e4SLinus Torvalds 		return NULL;
4101da177e4SLinus Torvalds 
4111da177e4SLinus Torvalds 	area = get_vm_area((count << PAGE_SHIFT), flags);
4121da177e4SLinus Torvalds 	if (!area)
4131da177e4SLinus Torvalds 		return NULL;
4141da177e4SLinus Torvalds 	if (map_vm_area(area, prot, &pages)) {
4151da177e4SLinus Torvalds 		vunmap(area->addr);
4161da177e4SLinus Torvalds 		return NULL;
4171da177e4SLinus Torvalds 	}
4181da177e4SLinus Torvalds 
4191da177e4SLinus Torvalds 	return area->addr;
4201da177e4SLinus Torvalds }
4211da177e4SLinus Torvalds EXPORT_SYMBOL(vmap);
4221da177e4SLinus Torvalds 
423930fc45aSChristoph Lameter void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
424930fc45aSChristoph Lameter 				pgprot_t prot, int node)
4251da177e4SLinus Torvalds {
4261da177e4SLinus Torvalds 	struct page **pages;
4271da177e4SLinus Torvalds 	unsigned int nr_pages, array_size, i;
4281da177e4SLinus Torvalds 
4291da177e4SLinus Torvalds 	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
4301da177e4SLinus Torvalds 	array_size = (nr_pages * sizeof(struct page *));
4311da177e4SLinus Torvalds 
4321da177e4SLinus Torvalds 	area->nr_pages = nr_pages;
4331da177e4SLinus Torvalds 	/* Please note that the recursion is strictly bounded. */
4348757d5faSJan Kiszka 	if (array_size > PAGE_SIZE) {
43594f6030cSChristoph Lameter 		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
43694f6030cSChristoph Lameter 					PAGE_KERNEL, node);
4378757d5faSJan Kiszka 		area->flags |= VM_VPAGES;
438286e1ea3SAndrew Morton 	} else {
439286e1ea3SAndrew Morton 		pages = kmalloc_node(array_size,
44094f6030cSChristoph Lameter 				(gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
441286e1ea3SAndrew Morton 				node);
442286e1ea3SAndrew Morton 	}
4431da177e4SLinus Torvalds 	area->pages = pages;
4441da177e4SLinus Torvalds 	if (!area->pages) {
4451da177e4SLinus Torvalds 		remove_vm_area(area->addr);
4461da177e4SLinus Torvalds 		kfree(area);
4471da177e4SLinus Torvalds 		return NULL;
4481da177e4SLinus Torvalds 	}
4491da177e4SLinus Torvalds 
4501da177e4SLinus Torvalds 	for (i = 0; i < area->nr_pages; i++) {
451930fc45aSChristoph Lameter 		if (node < 0)
4521da177e4SLinus Torvalds 			area->pages[i] = alloc_page(gfp_mask);
453930fc45aSChristoph Lameter 		else
454930fc45aSChristoph Lameter 			area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
4551da177e4SLinus Torvalds 		if (unlikely(!area->pages[i])) {
4561da177e4SLinus Torvalds 			/* Successfully allocated i pages, free them in __vunmap() */
4571da177e4SLinus Torvalds 			area->nr_pages = i;
4581da177e4SLinus Torvalds 			goto fail;
4591da177e4SLinus Torvalds 		}
4601da177e4SLinus Torvalds 	}
4611da177e4SLinus Torvalds 
4621da177e4SLinus Torvalds 	if (map_vm_area(area, prot, &pages))
4631da177e4SLinus Torvalds 		goto fail;
4641da177e4SLinus Torvalds 	return area->addr;
4651da177e4SLinus Torvalds 
4661da177e4SLinus Torvalds fail:
4671da177e4SLinus Torvalds 	vfree(area->addr);
4681da177e4SLinus Torvalds 	return NULL;
4691da177e4SLinus Torvalds }
4701da177e4SLinus Torvalds 
471930fc45aSChristoph Lameter void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
472930fc45aSChristoph Lameter {
473930fc45aSChristoph Lameter 	return __vmalloc_area_node(area, gfp_mask, prot, -1);
474930fc45aSChristoph Lameter }
475930fc45aSChristoph Lameter 
4761da177e4SLinus Torvalds /**
477930fc45aSChristoph Lameter  *	__vmalloc_node  -  allocate virtually contiguous memory
4781da177e4SLinus Torvalds  *	@size:		allocation size
4791da177e4SLinus Torvalds  *	@gfp_mask:	flags for the page level allocator
4801da177e4SLinus Torvalds  *	@prot:		protection mask for the allocated pages
481d44e0780SRandy Dunlap  *	@node:		node to use for allocation or -1
4821da177e4SLinus Torvalds  *
4831da177e4SLinus Torvalds  *	Allocate enough pages to cover @size from the page level
4841da177e4SLinus Torvalds  *	allocator with @gfp_mask flags.  Map them into contiguous
4851da177e4SLinus Torvalds  *	kernel virtual space, using a pagetable protection of @prot.
4861da177e4SLinus Torvalds  */
487b221385bSAdrian Bunk static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
488930fc45aSChristoph Lameter 			    int node)
4891da177e4SLinus Torvalds {
4901da177e4SLinus Torvalds 	struct vm_struct *area;
4911da177e4SLinus Torvalds 
4921da177e4SLinus Torvalds 	size = PAGE_ALIGN(size);
4931da177e4SLinus Torvalds 	if (!size || (size >> PAGE_SHIFT) > num_physpages)
4941da177e4SLinus Torvalds 		return NULL;
4951da177e4SLinus Torvalds 
49652fd24caSGiridhar Pemmasani 	area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
4971da177e4SLinus Torvalds 	if (!area)
4981da177e4SLinus Torvalds 		return NULL;
4991da177e4SLinus Torvalds 
500930fc45aSChristoph Lameter 	return __vmalloc_area_node(area, gfp_mask, prot, node);
5011da177e4SLinus Torvalds }
5021da177e4SLinus Torvalds 
503930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
504930fc45aSChristoph Lameter {
505930fc45aSChristoph Lameter 	return __vmalloc_node(size, gfp_mask, prot, -1);
506930fc45aSChristoph Lameter }
5071da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc);
5081da177e4SLinus Torvalds 
5091da177e4SLinus Torvalds /**
5101da177e4SLinus Torvalds  *	vmalloc  -  allocate virtually contiguous memory
5111da177e4SLinus Torvalds  *	@size:		allocation size
5121da177e4SLinus Torvalds  *	Allocate enough pages to cover @size from the page level
5131da177e4SLinus Torvalds  *	allocator and map them into contiguous kernel virtual space.
5141da177e4SLinus Torvalds  *
515c1c8897fSMichael Opdenacker  *	For tight control over page level allocator and protection flags
5161da177e4SLinus Torvalds  *	use __vmalloc() instead.
5171da177e4SLinus Torvalds  */
5181da177e4SLinus Torvalds void *vmalloc(unsigned long size)
5191da177e4SLinus Torvalds {
5201da177e4SLinus Torvalds 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
5211da177e4SLinus Torvalds }
5221da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc);
5231da177e4SLinus Torvalds 
524930fc45aSChristoph Lameter /**
525ead04089SRolf Eike Beer  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
52683342314SNick Piggin  * @size: allocation size
527ead04089SRolf Eike Beer  *
528ead04089SRolf Eike Beer  * The resulting memory area is zeroed so it can be mapped to userspace
529ead04089SRolf Eike Beer  * without leaking data.
53083342314SNick Piggin  */
53183342314SNick Piggin void *vmalloc_user(unsigned long size)
53283342314SNick Piggin {
53383342314SNick Piggin 	struct vm_struct *area;
53483342314SNick Piggin 	void *ret;
53583342314SNick Piggin 
53683342314SNick Piggin 	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
5372b4ac44eSEric Dumazet 	if (ret) {
53883342314SNick Piggin 		write_lock(&vmlist_lock);
53983342314SNick Piggin 		area = __find_vm_area(ret);
54083342314SNick Piggin 		area->flags |= VM_USERMAP;
54183342314SNick Piggin 		write_unlock(&vmlist_lock);
5422b4ac44eSEric Dumazet 	}
54383342314SNick Piggin 	return ret;
54483342314SNick Piggin }
54583342314SNick Piggin EXPORT_SYMBOL(vmalloc_user);
54683342314SNick Piggin 
54783342314SNick Piggin /**
548930fc45aSChristoph Lameter  *	vmalloc_node  -  allocate memory on a specific node
549930fc45aSChristoph Lameter  *	@size:		allocation size
550d44e0780SRandy Dunlap  *	@node:		numa node
551930fc45aSChristoph Lameter  *
552930fc45aSChristoph Lameter  *	Allocate enough pages to cover @size from the page level
553930fc45aSChristoph Lameter  *	allocator and map them into contiguous kernel virtual space.
554930fc45aSChristoph Lameter  *
555c1c8897fSMichael Opdenacker  *	For tight control over page level allocator and protection flags
556930fc45aSChristoph Lameter  *	use __vmalloc() instead.
557930fc45aSChristoph Lameter  */
558930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node)
559930fc45aSChristoph Lameter {
560930fc45aSChristoph Lameter 	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
561930fc45aSChristoph Lameter }
562930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node);
563930fc45aSChristoph Lameter 
5644dc3b16bSPavel Pisa #ifndef PAGE_KERNEL_EXEC
5654dc3b16bSPavel Pisa # define PAGE_KERNEL_EXEC PAGE_KERNEL
5664dc3b16bSPavel Pisa #endif
5674dc3b16bSPavel Pisa 
5681da177e4SLinus Torvalds /**
5691da177e4SLinus Torvalds  *	vmalloc_exec  -  allocate virtually contiguous, executable memory
5701da177e4SLinus Torvalds  *	@size:		allocation size
5711da177e4SLinus Torvalds  *
5721da177e4SLinus Torvalds  *	Kernel-internal function to allocate enough pages to cover @size
5731da177e4SLinus Torvalds  *	the page level allocator and map them into contiguous and
5741da177e4SLinus Torvalds  *	executable kernel virtual space.
5751da177e4SLinus Torvalds  *
576c1c8897fSMichael Opdenacker  *	For tight control over page level allocator and protection flags
5771da177e4SLinus Torvalds  *	use __vmalloc() instead.
5781da177e4SLinus Torvalds  */
5791da177e4SLinus Torvalds 
5801da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size)
5811da177e4SLinus Torvalds {
5821da177e4SLinus Torvalds 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
5831da177e4SLinus Torvalds }
5841da177e4SLinus Torvalds 
5850d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
586*7ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
5870d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
588*7ac674f5SBenjamin Herrenschmidt #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
5890d08e0d3SAndi Kleen #else
5900d08e0d3SAndi Kleen #define GFP_VMALLOC32 GFP_KERNEL
5910d08e0d3SAndi Kleen #endif
5920d08e0d3SAndi Kleen 
5931da177e4SLinus Torvalds /**
5941da177e4SLinus Torvalds  *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
5951da177e4SLinus Torvalds  *	@size:		allocation size
5961da177e4SLinus Torvalds  *
5971da177e4SLinus Torvalds  *	Allocate enough 32bit PA addressable pages to cover @size from the
5981da177e4SLinus Torvalds  *	page level allocator and map them into contiguous kernel virtual space.
5991da177e4SLinus Torvalds  */
6001da177e4SLinus Torvalds void *vmalloc_32(unsigned long size)
6011da177e4SLinus Torvalds {
6020d08e0d3SAndi Kleen 	return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
6031da177e4SLinus Torvalds }
6041da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32);
6051da177e4SLinus Torvalds 
60683342314SNick Piggin /**
607ead04089SRolf Eike Beer  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
60883342314SNick Piggin  *	@size:		allocation size
609ead04089SRolf Eike Beer  *
610ead04089SRolf Eike Beer  * The resulting memory area is 32bit addressable and zeroed so it can be
611ead04089SRolf Eike Beer  * mapped to userspace without leaking data.
61283342314SNick Piggin  */
61383342314SNick Piggin void *vmalloc_32_user(unsigned long size)
61483342314SNick Piggin {
61583342314SNick Piggin 	struct vm_struct *area;
61683342314SNick Piggin 	void *ret;
61783342314SNick Piggin 
6180d08e0d3SAndi Kleen 	ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
6192b4ac44eSEric Dumazet 	if (ret) {
62083342314SNick Piggin 		write_lock(&vmlist_lock);
62183342314SNick Piggin 		area = __find_vm_area(ret);
62283342314SNick Piggin 		area->flags |= VM_USERMAP;
62383342314SNick Piggin 		write_unlock(&vmlist_lock);
6242b4ac44eSEric Dumazet 	}
62583342314SNick Piggin 	return ret;
62683342314SNick Piggin }
62783342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user);
62883342314SNick Piggin 
6291da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count)
6301da177e4SLinus Torvalds {
6311da177e4SLinus Torvalds 	struct vm_struct *tmp;
6321da177e4SLinus Torvalds 	char *vaddr, *buf_start = buf;
6331da177e4SLinus Torvalds 	unsigned long n;
6341da177e4SLinus Torvalds 
6351da177e4SLinus Torvalds 	/* Don't allow overflow */
6361da177e4SLinus Torvalds 	if ((unsigned long) addr + count < count)
6371da177e4SLinus Torvalds 		count = -(unsigned long) addr;
6381da177e4SLinus Torvalds 
6391da177e4SLinus Torvalds 	read_lock(&vmlist_lock);
6401da177e4SLinus Torvalds 	for (tmp = vmlist; tmp; tmp = tmp->next) {
6411da177e4SLinus Torvalds 		vaddr = (char *) tmp->addr;
6421da177e4SLinus Torvalds 		if (addr >= vaddr + tmp->size - PAGE_SIZE)
6431da177e4SLinus Torvalds 			continue;
6441da177e4SLinus Torvalds 		while (addr < vaddr) {
6451da177e4SLinus Torvalds 			if (count == 0)
6461da177e4SLinus Torvalds 				goto finished;
6471da177e4SLinus Torvalds 			*buf = '\0';
6481da177e4SLinus Torvalds 			buf++;
6491da177e4SLinus Torvalds 			addr++;
6501da177e4SLinus Torvalds 			count--;
6511da177e4SLinus Torvalds 		}
6521da177e4SLinus Torvalds 		n = vaddr + tmp->size - PAGE_SIZE - addr;
6531da177e4SLinus Torvalds 		do {
6541da177e4SLinus Torvalds 			if (count == 0)
6551da177e4SLinus Torvalds 				goto finished;
6561da177e4SLinus Torvalds 			*buf = *addr;
6571da177e4SLinus Torvalds 			buf++;
6581da177e4SLinus Torvalds 			addr++;
6591da177e4SLinus Torvalds 			count--;
6601da177e4SLinus Torvalds 		} while (--n > 0);
6611da177e4SLinus Torvalds 	}
6621da177e4SLinus Torvalds finished:
6631da177e4SLinus Torvalds 	read_unlock(&vmlist_lock);
6641da177e4SLinus Torvalds 	return buf - buf_start;
6651da177e4SLinus Torvalds }
6661da177e4SLinus Torvalds 
6671da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count)
6681da177e4SLinus Torvalds {
6691da177e4SLinus Torvalds 	struct vm_struct *tmp;
6701da177e4SLinus Torvalds 	char *vaddr, *buf_start = buf;
6711da177e4SLinus Torvalds 	unsigned long n;
6721da177e4SLinus Torvalds 
6731da177e4SLinus Torvalds 	/* Don't allow overflow */
6741da177e4SLinus Torvalds 	if ((unsigned long) addr + count < count)
6751da177e4SLinus Torvalds 		count = -(unsigned long) addr;
6761da177e4SLinus Torvalds 
6771da177e4SLinus Torvalds 	read_lock(&vmlist_lock);
6781da177e4SLinus Torvalds 	for (tmp = vmlist; tmp; tmp = tmp->next) {
6791da177e4SLinus Torvalds 		vaddr = (char *) tmp->addr;
6801da177e4SLinus Torvalds 		if (addr >= vaddr + tmp->size - PAGE_SIZE)
6811da177e4SLinus Torvalds 			continue;
6821da177e4SLinus Torvalds 		while (addr < vaddr) {
6831da177e4SLinus Torvalds 			if (count == 0)
6841da177e4SLinus Torvalds 				goto finished;
6851da177e4SLinus Torvalds 			buf++;
6861da177e4SLinus Torvalds 			addr++;
6871da177e4SLinus Torvalds 			count--;
6881da177e4SLinus Torvalds 		}
6891da177e4SLinus Torvalds 		n = vaddr + tmp->size - PAGE_SIZE - addr;
6901da177e4SLinus Torvalds 		do {
6911da177e4SLinus Torvalds 			if (count == 0)
6921da177e4SLinus Torvalds 				goto finished;
6931da177e4SLinus Torvalds 			*addr = *buf;
6941da177e4SLinus Torvalds 			buf++;
6951da177e4SLinus Torvalds 			addr++;
6961da177e4SLinus Torvalds 			count--;
6971da177e4SLinus Torvalds 		} while (--n > 0);
6981da177e4SLinus Torvalds 	}
6991da177e4SLinus Torvalds finished:
7001da177e4SLinus Torvalds 	read_unlock(&vmlist_lock);
7011da177e4SLinus Torvalds 	return buf - buf_start;
7021da177e4SLinus Torvalds }
70383342314SNick Piggin 
70483342314SNick Piggin /**
70583342314SNick Piggin  *	remap_vmalloc_range  -  map vmalloc pages to userspace
70683342314SNick Piggin  *	@vma:		vma to cover (map full range of vma)
70783342314SNick Piggin  *	@addr:		vmalloc memory
70883342314SNick Piggin  *	@pgoff:		number of pages into addr before first page to map
70983342314SNick Piggin  *	@returns:	0 for success, -Exxx on failure
71083342314SNick Piggin  *
71183342314SNick Piggin  *	This function checks that addr is a valid vmalloc'ed area, and
71283342314SNick Piggin  *	that it is big enough to cover the vma. Will return failure if
71383342314SNick Piggin  *	that criteria isn't met.
71483342314SNick Piggin  *
71572fd4a35SRobert P. J. Day  *	Similar to remap_pfn_range() (see mm/memory.c)
71683342314SNick Piggin  */
71783342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
71883342314SNick Piggin 						unsigned long pgoff)
71983342314SNick Piggin {
72083342314SNick Piggin 	struct vm_struct *area;
72183342314SNick Piggin 	unsigned long uaddr = vma->vm_start;
72283342314SNick Piggin 	unsigned long usize = vma->vm_end - vma->vm_start;
72383342314SNick Piggin 	int ret;
72483342314SNick Piggin 
72583342314SNick Piggin 	if ((PAGE_SIZE-1) & (unsigned long)addr)
72683342314SNick Piggin 		return -EINVAL;
72783342314SNick Piggin 
72883342314SNick Piggin 	read_lock(&vmlist_lock);
72983342314SNick Piggin 	area = __find_vm_area(addr);
73083342314SNick Piggin 	if (!area)
73183342314SNick Piggin 		goto out_einval_locked;
73283342314SNick Piggin 
73383342314SNick Piggin 	if (!(area->flags & VM_USERMAP))
73483342314SNick Piggin 		goto out_einval_locked;
73583342314SNick Piggin 
73683342314SNick Piggin 	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
73783342314SNick Piggin 		goto out_einval_locked;
73883342314SNick Piggin 	read_unlock(&vmlist_lock);
73983342314SNick Piggin 
74083342314SNick Piggin 	addr += pgoff << PAGE_SHIFT;
74183342314SNick Piggin 	do {
74283342314SNick Piggin 		struct page *page = vmalloc_to_page(addr);
74383342314SNick Piggin 		ret = vm_insert_page(vma, uaddr, page);
74483342314SNick Piggin 		if (ret)
74583342314SNick Piggin 			return ret;
74683342314SNick Piggin 
74783342314SNick Piggin 		uaddr += PAGE_SIZE;
74883342314SNick Piggin 		addr += PAGE_SIZE;
74983342314SNick Piggin 		usize -= PAGE_SIZE;
75083342314SNick Piggin 	} while (usize > 0);
75183342314SNick Piggin 
75283342314SNick Piggin 	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
75383342314SNick Piggin 	vma->vm_flags |= VM_RESERVED;
75483342314SNick Piggin 
75583342314SNick Piggin 	return ret;
75683342314SNick Piggin 
75783342314SNick Piggin out_einval_locked:
75883342314SNick Piggin 	read_unlock(&vmlist_lock);
75983342314SNick Piggin 	return -EINVAL;
76083342314SNick Piggin }
76183342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range);
76283342314SNick Piggin 
7631eeb66a1SChristoph Hellwig /*
7641eeb66a1SChristoph Hellwig  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
7651eeb66a1SChristoph Hellwig  * have one.
7661eeb66a1SChristoph Hellwig  */
7671eeb66a1SChristoph Hellwig void  __attribute__((weak)) vmalloc_sync_all(void)
7681eeb66a1SChristoph Hellwig {
7691eeb66a1SChristoph Hellwig }
7705f4352fbSJeremy Fitzhardinge 
7715f4352fbSJeremy Fitzhardinge 
7725f4352fbSJeremy Fitzhardinge static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
7735f4352fbSJeremy Fitzhardinge {
7745f4352fbSJeremy Fitzhardinge 	/* apply_to_page_range() does all the hard work. */
7755f4352fbSJeremy Fitzhardinge 	return 0;
7765f4352fbSJeremy Fitzhardinge }
7775f4352fbSJeremy Fitzhardinge 
7785f4352fbSJeremy Fitzhardinge /**
7795f4352fbSJeremy Fitzhardinge  *	alloc_vm_area - allocate a range of kernel address space
7805f4352fbSJeremy Fitzhardinge  *	@size:		size of the area
7815f4352fbSJeremy Fitzhardinge  *	@returns:	NULL on failure, vm_struct on success
7825f4352fbSJeremy Fitzhardinge  *
7835f4352fbSJeremy Fitzhardinge  *	This function reserves a range of kernel address space, and
7845f4352fbSJeremy Fitzhardinge  *	allocates pagetables to map that range.  No actual mappings
7855f4352fbSJeremy Fitzhardinge  *	are created.  If the kernel address space is not shared
7865f4352fbSJeremy Fitzhardinge  *	between processes, it syncs the pagetable across all
7875f4352fbSJeremy Fitzhardinge  *	processes.
7885f4352fbSJeremy Fitzhardinge  */
7895f4352fbSJeremy Fitzhardinge struct vm_struct *alloc_vm_area(size_t size)
7905f4352fbSJeremy Fitzhardinge {
7915f4352fbSJeremy Fitzhardinge 	struct vm_struct *area;
7925f4352fbSJeremy Fitzhardinge 
7935f4352fbSJeremy Fitzhardinge 	area = get_vm_area(size, VM_IOREMAP);
7945f4352fbSJeremy Fitzhardinge 	if (area == NULL)
7955f4352fbSJeremy Fitzhardinge 		return NULL;
7965f4352fbSJeremy Fitzhardinge 
7975f4352fbSJeremy Fitzhardinge 	/*
7985f4352fbSJeremy Fitzhardinge 	 * This ensures that page tables are constructed for this region
7995f4352fbSJeremy Fitzhardinge 	 * of kernel virtual address space and mapped into init_mm.
8005f4352fbSJeremy Fitzhardinge 	 */
8015f4352fbSJeremy Fitzhardinge 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
8025f4352fbSJeremy Fitzhardinge 				area->size, f, NULL)) {
8035f4352fbSJeremy Fitzhardinge 		free_vm_area(area);
8045f4352fbSJeremy Fitzhardinge 		return NULL;
8055f4352fbSJeremy Fitzhardinge 	}
8065f4352fbSJeremy Fitzhardinge 
8075f4352fbSJeremy Fitzhardinge 	/* Make sure the pagetables are constructed in process kernel
8085f4352fbSJeremy Fitzhardinge 	   mappings */
8095f4352fbSJeremy Fitzhardinge 	vmalloc_sync_all();
8105f4352fbSJeremy Fitzhardinge 
8115f4352fbSJeremy Fitzhardinge 	return area;
8125f4352fbSJeremy Fitzhardinge }
8135f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area);
8145f4352fbSJeremy Fitzhardinge 
8155f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area)
8165f4352fbSJeremy Fitzhardinge {
8175f4352fbSJeremy Fitzhardinge 	struct vm_struct *ret;
8185f4352fbSJeremy Fitzhardinge 	ret = remove_vm_area(area->addr);
8195f4352fbSJeremy Fitzhardinge 	BUG_ON(ret != area);
8205f4352fbSJeremy Fitzhardinge 	kfree(area);
8215f4352fbSJeremy Fitzhardinge }
8225f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area);
823