vmalloc.c (5a84d159061d914c8dd4aa372ac6e9529c2be453) vmalloc.c (0d08e0d3a97cce22ebf80b54785e00d9b94e1add)
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005

--- 417 unchanged lines hidden (view full) ---

426
427 area->nr_pages = nr_pages;
428 /* Please note that the recursion is strictly bounded. */
429 if (array_size > PAGE_SIZE) {
430 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
431 area->flags |= VM_VPAGES;
432 } else {
433 pages = kmalloc_node(array_size,
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005

--- 417 unchanged lines hidden (view full) ---

426
427 area->nr_pages = nr_pages;
428 /* Please note that the recursion is strictly bounded. */
429 if (array_size > PAGE_SIZE) {
430 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
431 area->flags |= VM_VPAGES;
432 } else {
433 pages = kmalloc_node(array_size,
434 (gfp_mask & ~(__GFP_HIGHMEM | __GFP_ZERO)),
434 (gfp_mask & GFP_LEVEL_MASK),
435 node);
436 }
437 area->pages = pages;
438 if (!area->pages) {
439 remove_vm_area(area->addr);
440 kfree(area);
441 return NULL;
442 }

--- 129 unchanged lines hidden (view full) ---

572 * use __vmalloc() instead.
573 */
574
575void *vmalloc_exec(unsigned long size)
576{
577 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
578}
579
435 node);
436 }
437 area->pages = pages;
438 if (!area->pages) {
439 remove_vm_area(area->addr);
440 kfree(area);
441 return NULL;
442 }

--- 129 unchanged lines hidden (view full) ---

572 * use __vmalloc() instead.
573 */
574
575void *vmalloc_exec(unsigned long size)
576{
577 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
578}
579
580#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
581#define GFP_VMALLOC32 GFP_DMA32
582#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
583#define GFP_VMALLOC32 GFP_DMA
584#else
585#define GFP_VMALLOC32 GFP_KERNEL
586#endif
587
580/**
581 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
582 * @size: allocation size
583 *
584 * Allocate enough 32bit PA addressable pages to cover @size from the
585 * page level allocator and map them into contiguous kernel virtual space.
586 */
587void *vmalloc_32(unsigned long size)
588{
588/**
589 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
590 * @size: allocation size
591 *
592 * Allocate enough 32bit PA addressable pages to cover @size from the
593 * page level allocator and map them into contiguous kernel virtual space.
594 */
595void *vmalloc_32(unsigned long size)
596{
589 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
597 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
590}
591EXPORT_SYMBOL(vmalloc_32);
592
593/**
594 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
595 * @size: allocation size
596 *
597 * The resulting memory area is 32bit addressable and zeroed so it can be
598 * mapped to userspace without leaking data.
599 */
600void *vmalloc_32_user(unsigned long size)
601{
602 struct vm_struct *area;
603 void *ret;
604
598}
599EXPORT_SYMBOL(vmalloc_32);
600
601/**
602 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
603 * @size: allocation size
604 *
605 * The resulting memory area is 32bit addressable and zeroed so it can be
606 * mapped to userspace without leaking data.
607 */
608void *vmalloc_32_user(unsigned long size)
609{
610 struct vm_struct *area;
611 void *ret;
612
605 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
613 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
606 if (ret) {
607 write_lock(&vmlist_lock);
608 area = __find_vm_area(ret);
609 area->flags |= VM_USERMAP;
610 write_unlock(&vmlist_lock);
611 }
612 return ret;
613}

--- 136 unchanged lines hidden ---
614 if (ret) {
615 write_lock(&vmlist_lock);
616 area = __find_vm_area(ret);
617 area->flags |= VM_USERMAP;
618 write_unlock(&vmlist_lock);
619 }
620 return ret;
621}

--- 136 unchanged lines hidden ---