xref: /openbmc/linux/mm/vmalloc.c (revision 52fd24ca1db3a741f144bbc229beefe044202cac)
1 /*
2  *  linux/mm/vmalloc.c
3  *
4  *  Copyright (C) 1993  Linus Torvalds
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8  *  Numa awareness, Christoph Lameter, SGI, June 2005
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 
18 #include <linux/vmalloc.h>
19 
20 #include <asm/uaccess.h>
21 #include <asm/tlbflush.h>
22 
23 
24 DEFINE_RWLOCK(vmlist_lock);
25 struct vm_struct *vmlist;
26 
27 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28 			    int node);
29 
30 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
31 {
32 	pte_t *pte;
33 
34 	pte = pte_offset_kernel(pmd, addr);
35 	do {
36 		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
37 		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
38 	} while (pte++, addr += PAGE_SIZE, addr != end);
39 }
40 
41 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
42 						unsigned long end)
43 {
44 	pmd_t *pmd;
45 	unsigned long next;
46 
47 	pmd = pmd_offset(pud, addr);
48 	do {
49 		next = pmd_addr_end(addr, end);
50 		if (pmd_none_or_clear_bad(pmd))
51 			continue;
52 		vunmap_pte_range(pmd, addr, next);
53 	} while (pmd++, addr = next, addr != end);
54 }
55 
56 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
57 						unsigned long end)
58 {
59 	pud_t *pud;
60 	unsigned long next;
61 
62 	pud = pud_offset(pgd, addr);
63 	do {
64 		next = pud_addr_end(addr, end);
65 		if (pud_none_or_clear_bad(pud))
66 			continue;
67 		vunmap_pmd_range(pud, addr, next);
68 	} while (pud++, addr = next, addr != end);
69 }
70 
71 void unmap_vm_area(struct vm_struct *area)
72 {
73 	pgd_t *pgd;
74 	unsigned long next;
75 	unsigned long addr = (unsigned long) area->addr;
76 	unsigned long end = addr + area->size;
77 
78 	BUG_ON(addr >= end);
79 	pgd = pgd_offset_k(addr);
80 	flush_cache_vunmap(addr, end);
81 	do {
82 		next = pgd_addr_end(addr, end);
83 		if (pgd_none_or_clear_bad(pgd))
84 			continue;
85 		vunmap_pud_range(pgd, addr, next);
86 	} while (pgd++, addr = next, addr != end);
87 	flush_tlb_kernel_range((unsigned long) area->addr, end);
88 }
89 
90 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
91 			unsigned long end, pgprot_t prot, struct page ***pages)
92 {
93 	pte_t *pte;
94 
95 	pte = pte_alloc_kernel(pmd, addr);
96 	if (!pte)
97 		return -ENOMEM;
98 	do {
99 		struct page *page = **pages;
100 		WARN_ON(!pte_none(*pte));
101 		if (!page)
102 			return -ENOMEM;
103 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
104 		(*pages)++;
105 	} while (pte++, addr += PAGE_SIZE, addr != end);
106 	return 0;
107 }
108 
109 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
110 			unsigned long end, pgprot_t prot, struct page ***pages)
111 {
112 	pmd_t *pmd;
113 	unsigned long next;
114 
115 	pmd = pmd_alloc(&init_mm, pud, addr);
116 	if (!pmd)
117 		return -ENOMEM;
118 	do {
119 		next = pmd_addr_end(addr, end);
120 		if (vmap_pte_range(pmd, addr, next, prot, pages))
121 			return -ENOMEM;
122 	} while (pmd++, addr = next, addr != end);
123 	return 0;
124 }
125 
126 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
127 			unsigned long end, pgprot_t prot, struct page ***pages)
128 {
129 	pud_t *pud;
130 	unsigned long next;
131 
132 	pud = pud_alloc(&init_mm, pgd, addr);
133 	if (!pud)
134 		return -ENOMEM;
135 	do {
136 		next = pud_addr_end(addr, end);
137 		if (vmap_pmd_range(pud, addr, next, prot, pages))
138 			return -ENOMEM;
139 	} while (pud++, addr = next, addr != end);
140 	return 0;
141 }
142 
143 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
144 {
145 	pgd_t *pgd;
146 	unsigned long next;
147 	unsigned long addr = (unsigned long) area->addr;
148 	unsigned long end = addr + area->size - PAGE_SIZE;
149 	int err;
150 
151 	BUG_ON(addr >= end);
152 	pgd = pgd_offset_k(addr);
153 	do {
154 		next = pgd_addr_end(addr, end);
155 		err = vmap_pud_range(pgd, addr, next, prot, pages);
156 		if (err)
157 			break;
158 	} while (pgd++, addr = next, addr != end);
159 	flush_cache_vmap((unsigned long) area->addr, end);
160 	return err;
161 }
162 
163 static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
164 					    unsigned long start, unsigned long end,
165 					    int node, gfp_t gfp_mask)
166 {
167 	struct vm_struct **p, *tmp, *area;
168 	unsigned long align = 1;
169 	unsigned long addr;
170 
171 	BUG_ON(in_interrupt());
172 	if (flags & VM_IOREMAP) {
173 		int bit = fls(size);
174 
175 		if (bit > IOREMAP_MAX_ORDER)
176 			bit = IOREMAP_MAX_ORDER;
177 		else if (bit < PAGE_SHIFT)
178 			bit = PAGE_SHIFT;
179 
180 		align = 1ul << bit;
181 	}
182 	addr = ALIGN(start, align);
183 	size = PAGE_ALIGN(size);
184 
185 	area = kmalloc_node(sizeof(*area), gfp_mask, node);
186 	if (unlikely(!area))
187 		return NULL;
188 
189 	if (unlikely(!size)) {
190 		kfree (area);
191 		return NULL;
192 	}
193 
194 	/*
195 	 * We always allocate a guard page.
196 	 */
197 	size += PAGE_SIZE;
198 
199 	write_lock(&vmlist_lock);
200 	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
201 		if ((unsigned long)tmp->addr < addr) {
202 			if((unsigned long)tmp->addr + tmp->size >= addr)
203 				addr = ALIGN(tmp->size +
204 					     (unsigned long)tmp->addr, align);
205 			continue;
206 		}
207 		if ((size + addr) < addr)
208 			goto out;
209 		if (size + addr <= (unsigned long)tmp->addr)
210 			goto found;
211 		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
212 		if (addr > end - size)
213 			goto out;
214 	}
215 
216 found:
217 	area->next = *p;
218 	*p = area;
219 
220 	area->flags = flags;
221 	area->addr = (void *)addr;
222 	area->size = size;
223 	area->pages = NULL;
224 	area->nr_pages = 0;
225 	area->phys_addr = 0;
226 	write_unlock(&vmlist_lock);
227 
228 	return area;
229 
230 out:
231 	write_unlock(&vmlist_lock);
232 	kfree(area);
233 	if (printk_ratelimit())
234 		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
235 	return NULL;
236 }
237 
238 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
239 				unsigned long start, unsigned long end)
240 {
241 	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
242 }
243 
244 /**
245  *	get_vm_area  -  reserve a contingous kernel virtual area
246  *	@size:		size of the area
247  *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
248  *
249  *	Search an area of @size in the kernel virtual mapping area,
250  *	and reserved it for out purposes.  Returns the area descriptor
251  *	on success or %NULL on failure.
252  */
253 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
254 {
255 	return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
256 }
257 
258 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
259 				   int node, gfp_t gfp_mask)
260 {
261 	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
262 				  gfp_mask);
263 }
264 
265 /* Caller must hold vmlist_lock */
266 static struct vm_struct *__find_vm_area(void *addr)
267 {
268 	struct vm_struct *tmp;
269 
270 	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
271 		 if (tmp->addr == addr)
272 			break;
273 	}
274 
275 	return tmp;
276 }
277 
278 /* Caller must hold vmlist_lock */
279 static struct vm_struct *__remove_vm_area(void *addr)
280 {
281 	struct vm_struct **p, *tmp;
282 
283 	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
284 		 if (tmp->addr == addr)
285 			 goto found;
286 	}
287 	return NULL;
288 
289 found:
290 	unmap_vm_area(tmp);
291 	*p = tmp->next;
292 
293 	/*
294 	 * Remove the guard page.
295 	 */
296 	tmp->size -= PAGE_SIZE;
297 	return tmp;
298 }
299 
300 /**
301  *	remove_vm_area  -  find and remove a contingous kernel virtual area
302  *	@addr:		base address
303  *
304  *	Search for the kernel VM area starting at @addr, and remove it.
305  *	This function returns the found VM area, but using it is NOT safe
306  *	on SMP machines, except for its size or flags.
307  */
308 struct vm_struct *remove_vm_area(void *addr)
309 {
310 	struct vm_struct *v;
311 	write_lock(&vmlist_lock);
312 	v = __remove_vm_area(addr);
313 	write_unlock(&vmlist_lock);
314 	return v;
315 }
316 
317 void __vunmap(void *addr, int deallocate_pages)
318 {
319 	struct vm_struct *area;
320 
321 	if (!addr)
322 		return;
323 
324 	if ((PAGE_SIZE-1) & (unsigned long)addr) {
325 		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
326 		WARN_ON(1);
327 		return;
328 	}
329 
330 	area = remove_vm_area(addr);
331 	if (unlikely(!area)) {
332 		printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
333 				addr);
334 		WARN_ON(1);
335 		return;
336 	}
337 
338 	debug_check_no_locks_freed(addr, area->size);
339 
340 	if (deallocate_pages) {
341 		int i;
342 
343 		for (i = 0; i < area->nr_pages; i++) {
344 			BUG_ON(!area->pages[i]);
345 			__free_page(area->pages[i]);
346 		}
347 
348 		if (area->flags & VM_VPAGES)
349 			vfree(area->pages);
350 		else
351 			kfree(area->pages);
352 	}
353 
354 	kfree(area);
355 	return;
356 }
357 
358 /**
359  *	vfree  -  release memory allocated by vmalloc()
360  *	@addr:		memory base address
361  *
362  *	Free the virtually contiguous memory area starting at @addr, as
363  *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
364  *	NULL, no operation is performed.
365  *
366  *	Must not be called in interrupt context.
367  */
368 void vfree(void *addr)
369 {
370 	BUG_ON(in_interrupt());
371 	__vunmap(addr, 1);
372 }
373 EXPORT_SYMBOL(vfree);
374 
375 /**
376  *	vunmap  -  release virtual mapping obtained by vmap()
377  *	@addr:		memory base address
378  *
379  *	Free the virtually contiguous memory area starting at @addr,
380  *	which was created from the page array passed to vmap().
381  *
382  *	Must not be called in interrupt context.
383  */
384 void vunmap(void *addr)
385 {
386 	BUG_ON(in_interrupt());
387 	__vunmap(addr, 0);
388 }
389 EXPORT_SYMBOL(vunmap);
390 
391 /**
392  *	vmap  -  map an array of pages into virtually contiguous space
393  *	@pages:		array of page pointers
394  *	@count:		number of pages to map
395  *	@flags:		vm_area->flags
396  *	@prot:		page protection for the mapping
397  *
398  *	Maps @count pages from @pages into contiguous kernel virtual
399  *	space.
400  */
401 void *vmap(struct page **pages, unsigned int count,
402 		unsigned long flags, pgprot_t prot)
403 {
404 	struct vm_struct *area;
405 
406 	if (count > num_physpages)
407 		return NULL;
408 
409 	area = get_vm_area((count << PAGE_SHIFT), flags);
410 	if (!area)
411 		return NULL;
412 	if (map_vm_area(area, prot, &pages)) {
413 		vunmap(area->addr);
414 		return NULL;
415 	}
416 
417 	return area->addr;
418 }
419 EXPORT_SYMBOL(vmap);
420 
421 void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
422 				pgprot_t prot, int node)
423 {
424 	struct page **pages;
425 	unsigned int nr_pages, array_size, i;
426 
427 	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
428 	array_size = (nr_pages * sizeof(struct page *));
429 
430 	area->nr_pages = nr_pages;
431 	/* Please note that the recursion is strictly bounded. */
432 	if (array_size > PAGE_SIZE) {
433 		pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
434 		area->flags |= VM_VPAGES;
435 	} else {
436 		pages = kmalloc_node(array_size,
437 				(gfp_mask & ~(__GFP_HIGHMEM | __GFP_ZERO)),
438 				node);
439 	}
440 	area->pages = pages;
441 	if (!area->pages) {
442 		remove_vm_area(area->addr);
443 		kfree(area);
444 		return NULL;
445 	}
446 	memset(area->pages, 0, array_size);
447 
448 	for (i = 0; i < area->nr_pages; i++) {
449 		if (node < 0)
450 			area->pages[i] = alloc_page(gfp_mask);
451 		else
452 			area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
453 		if (unlikely(!area->pages[i])) {
454 			/* Successfully allocated i pages, free them in __vunmap() */
455 			area->nr_pages = i;
456 			goto fail;
457 		}
458 	}
459 
460 	if (map_vm_area(area, prot, &pages))
461 		goto fail;
462 	return area->addr;
463 
464 fail:
465 	vfree(area->addr);
466 	return NULL;
467 }
468 
469 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
470 {
471 	return __vmalloc_area_node(area, gfp_mask, prot, -1);
472 }
473 
474 /**
475  *	__vmalloc_node  -  allocate virtually contiguous memory
476  *	@size:		allocation size
477  *	@gfp_mask:	flags for the page level allocator
478  *	@prot:		protection mask for the allocated pages
479  *	@node:		node to use for allocation or -1
480  *
481  *	Allocate enough pages to cover @size from the page level
482  *	allocator with @gfp_mask flags.  Map them into contiguous
483  *	kernel virtual space, using a pagetable protection of @prot.
484  */
485 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
486 			    int node)
487 {
488 	struct vm_struct *area;
489 
490 	size = PAGE_ALIGN(size);
491 	if (!size || (size >> PAGE_SHIFT) > num_physpages)
492 		return NULL;
493 
494 	area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
495 	if (!area)
496 		return NULL;
497 
498 	return __vmalloc_area_node(area, gfp_mask, prot, node);
499 }
500 
501 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
502 {
503 	return __vmalloc_node(size, gfp_mask, prot, -1);
504 }
505 EXPORT_SYMBOL(__vmalloc);
506 
507 /**
508  *	vmalloc  -  allocate virtually contiguous memory
509  *	@size:		allocation size
510  *	Allocate enough pages to cover @size from the page level
511  *	allocator and map them into contiguous kernel virtual space.
512  *
513  *	For tight control over page level allocator and protection flags
514  *	use __vmalloc() instead.
515  */
516 void *vmalloc(unsigned long size)
517 {
518 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
519 }
520 EXPORT_SYMBOL(vmalloc);
521 
522 /**
523  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
524  * @size: allocation size
525  *
526  * The resulting memory area is zeroed so it can be mapped to userspace
527  * without leaking data.
528  */
529 void *vmalloc_user(unsigned long size)
530 {
531 	struct vm_struct *area;
532 	void *ret;
533 
534 	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
535 	write_lock(&vmlist_lock);
536 	area = __find_vm_area(ret);
537 	area->flags |= VM_USERMAP;
538 	write_unlock(&vmlist_lock);
539 
540 	return ret;
541 }
542 EXPORT_SYMBOL(vmalloc_user);
543 
544 /**
545  *	vmalloc_node  -  allocate memory on a specific node
546  *	@size:		allocation size
547  *	@node:		numa node
548  *
549  *	Allocate enough pages to cover @size from the page level
550  *	allocator and map them into contiguous kernel virtual space.
551  *
552  *	For tight control over page level allocator and protection flags
553  *	use __vmalloc() instead.
554  */
555 void *vmalloc_node(unsigned long size, int node)
556 {
557 	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
558 }
559 EXPORT_SYMBOL(vmalloc_node);
560 
561 #ifndef PAGE_KERNEL_EXEC
562 # define PAGE_KERNEL_EXEC PAGE_KERNEL
563 #endif
564 
565 /**
566  *	vmalloc_exec  -  allocate virtually contiguous, executable memory
567  *	@size:		allocation size
568  *
569  *	Kernel-internal function to allocate enough pages to cover @size
570  *	the page level allocator and map them into contiguous and
571  *	executable kernel virtual space.
572  *
573  *	For tight control over page level allocator and protection flags
574  *	use __vmalloc() instead.
575  */
576 
577 void *vmalloc_exec(unsigned long size)
578 {
579 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
580 }
581 
582 /**
583  *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
584  *	@size:		allocation size
585  *
586  *	Allocate enough 32bit PA addressable pages to cover @size from the
587  *	page level allocator and map them into contiguous kernel virtual space.
588  */
589 void *vmalloc_32(unsigned long size)
590 {
591 	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
592 }
593 EXPORT_SYMBOL(vmalloc_32);
594 
595 /**
596  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
597  *	@size:		allocation size
598  *
599  * The resulting memory area is 32bit addressable and zeroed so it can be
600  * mapped to userspace without leaking data.
601  */
602 void *vmalloc_32_user(unsigned long size)
603 {
604 	struct vm_struct *area;
605 	void *ret;
606 
607 	ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
608 	write_lock(&vmlist_lock);
609 	area = __find_vm_area(ret);
610 	area->flags |= VM_USERMAP;
611 	write_unlock(&vmlist_lock);
612 
613 	return ret;
614 }
615 EXPORT_SYMBOL(vmalloc_32_user);
616 
617 long vread(char *buf, char *addr, unsigned long count)
618 {
619 	struct vm_struct *tmp;
620 	char *vaddr, *buf_start = buf;
621 	unsigned long n;
622 
623 	/* Don't allow overflow */
624 	if ((unsigned long) addr + count < count)
625 		count = -(unsigned long) addr;
626 
627 	read_lock(&vmlist_lock);
628 	for (tmp = vmlist; tmp; tmp = tmp->next) {
629 		vaddr = (char *) tmp->addr;
630 		if (addr >= vaddr + tmp->size - PAGE_SIZE)
631 			continue;
632 		while (addr < vaddr) {
633 			if (count == 0)
634 				goto finished;
635 			*buf = '\0';
636 			buf++;
637 			addr++;
638 			count--;
639 		}
640 		n = vaddr + tmp->size - PAGE_SIZE - addr;
641 		do {
642 			if (count == 0)
643 				goto finished;
644 			*buf = *addr;
645 			buf++;
646 			addr++;
647 			count--;
648 		} while (--n > 0);
649 	}
650 finished:
651 	read_unlock(&vmlist_lock);
652 	return buf - buf_start;
653 }
654 
655 long vwrite(char *buf, char *addr, unsigned long count)
656 {
657 	struct vm_struct *tmp;
658 	char *vaddr, *buf_start = buf;
659 	unsigned long n;
660 
661 	/* Don't allow overflow */
662 	if ((unsigned long) addr + count < count)
663 		count = -(unsigned long) addr;
664 
665 	read_lock(&vmlist_lock);
666 	for (tmp = vmlist; tmp; tmp = tmp->next) {
667 		vaddr = (char *) tmp->addr;
668 		if (addr >= vaddr + tmp->size - PAGE_SIZE)
669 			continue;
670 		while (addr < vaddr) {
671 			if (count == 0)
672 				goto finished;
673 			buf++;
674 			addr++;
675 			count--;
676 		}
677 		n = vaddr + tmp->size - PAGE_SIZE - addr;
678 		do {
679 			if (count == 0)
680 				goto finished;
681 			*addr = *buf;
682 			buf++;
683 			addr++;
684 			count--;
685 		} while (--n > 0);
686 	}
687 finished:
688 	read_unlock(&vmlist_lock);
689 	return buf - buf_start;
690 }
691 
692 /**
693  *	remap_vmalloc_range  -  map vmalloc pages to userspace
694  *	@vma:		vma to cover (map full range of vma)
695  *	@addr:		vmalloc memory
696  *	@pgoff:		number of pages into addr before first page to map
697  *	@returns:	0 for success, -Exxx on failure
698  *
699  *	This function checks that addr is a valid vmalloc'ed area, and
700  *	that it is big enough to cover the vma. Will return failure if
701  *	that criteria isn't met.
702  *
703  *	Similar to remap_pfn_range (see mm/memory.c)
704  */
705 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
706 						unsigned long pgoff)
707 {
708 	struct vm_struct *area;
709 	unsigned long uaddr = vma->vm_start;
710 	unsigned long usize = vma->vm_end - vma->vm_start;
711 	int ret;
712 
713 	if ((PAGE_SIZE-1) & (unsigned long)addr)
714 		return -EINVAL;
715 
716 	read_lock(&vmlist_lock);
717 	area = __find_vm_area(addr);
718 	if (!area)
719 		goto out_einval_locked;
720 
721 	if (!(area->flags & VM_USERMAP))
722 		goto out_einval_locked;
723 
724 	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
725 		goto out_einval_locked;
726 	read_unlock(&vmlist_lock);
727 
728 	addr += pgoff << PAGE_SHIFT;
729 	do {
730 		struct page *page = vmalloc_to_page(addr);
731 		ret = vm_insert_page(vma, uaddr, page);
732 		if (ret)
733 			return ret;
734 
735 		uaddr += PAGE_SIZE;
736 		addr += PAGE_SIZE;
737 		usize -= PAGE_SIZE;
738 	} while (usize > 0);
739 
740 	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
741 	vma->vm_flags |= VM_RESERVED;
742 
743 	return ret;
744 
745 out_einval_locked:
746 	read_unlock(&vmlist_lock);
747 	return -EINVAL;
748 }
749 EXPORT_SYMBOL(remap_vmalloc_range);
750 
751