xref: /openbmc/linux/mm/hugetlb.c (revision b76c8cfb)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Generic hugetlb support.
31da177e4SLinus Torvalds  * (C) William Irwin, April 2004
41da177e4SLinus Torvalds  */
51da177e4SLinus Torvalds #include <linux/gfp.h>
61da177e4SLinus Torvalds #include <linux/list.h>
71da177e4SLinus Torvalds #include <linux/init.h>
81da177e4SLinus Torvalds #include <linux/module.h>
91da177e4SLinus Torvalds #include <linux/mm.h>
10e1759c21SAlexey Dobriyan #include <linux/seq_file.h>
111da177e4SLinus Torvalds #include <linux/sysctl.h>
121da177e4SLinus Torvalds #include <linux/highmem.h>
13cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
141da177e4SLinus Torvalds #include <linux/nodemask.h>
1563551ae0SDavid Gibson #include <linux/pagemap.h>
165da7ca86SChristoph Lameter #include <linux/mempolicy.h>
17aea47ff3SChristoph Lameter #include <linux/cpuset.h>
183935baa9SDavid Gibson #include <linux/mutex.h>
19aa888a74SAndi Kleen #include <linux/bootmem.h>
20a3437870SNishanth Aravamudan #include <linux/sysfs.h>
21d6606683SLinus Torvalds 
2263551ae0SDavid Gibson #include <asm/page.h>
2363551ae0SDavid Gibson #include <asm/pgtable.h>
2478a34ae2SAdrian Bunk #include <asm/io.h>
2563551ae0SDavid Gibson 
2663551ae0SDavid Gibson #include <linux/hugetlb.h>
279a305230SLee Schermerhorn #include <linux/node.h>
287835e98bSNick Piggin #include "internal.h"
291da177e4SLinus Torvalds 
301da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
31396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
32396faf03SMel Gorman unsigned long hugepages_treat_as_movable;
33a5516438SAndi Kleen 
34e5ff2159SAndi Kleen static int max_hstate;
35e5ff2159SAndi Kleen unsigned int default_hstate_idx;
36e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE];
37e5ff2159SAndi Kleen 
3853ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages);
3953ba51d2SJon Tollefson 
40e5ff2159SAndi Kleen /* for command line parsing */
41e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate;
42e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages;
43e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size;
44e5ff2159SAndi Kleen 
45e5ff2159SAndi Kleen #define for_each_hstate(h) \
46e5ff2159SAndi Kleen 	for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
47396faf03SMel Gorman 
483935baa9SDavid Gibson /*
493935baa9SDavid Gibson  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
503935baa9SDavid Gibson  */
513935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock);
520bd0f9fbSEric Paris 
53e7c4b0bfSAndy Whitcroft /*
5496822904SAndy Whitcroft  * Region tracking -- allows tracking of reservations and instantiated pages
5596822904SAndy Whitcroft  *                    across the pages in a mapping.
5684afd99bSAndy Whitcroft  *
5784afd99bSAndy Whitcroft  * The region data structures are protected by a combination of the mmap_sem
5884afd99bSAndy Whitcroft  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
5984afd99bSAndy Whitcroft  * must either hold the mmap_sem for write, or the mmap_sem for read and
6084afd99bSAndy Whitcroft  * the hugetlb_instantiation mutex:
6184afd99bSAndy Whitcroft  *
6284afd99bSAndy Whitcroft  * 	down_write(&mm->mmap_sem);
6384afd99bSAndy Whitcroft  * or
6484afd99bSAndy Whitcroft  * 	down_read(&mm->mmap_sem);
6584afd99bSAndy Whitcroft  * 	mutex_lock(&hugetlb_instantiation_mutex);
6696822904SAndy Whitcroft  */
6796822904SAndy Whitcroft struct file_region {
6896822904SAndy Whitcroft 	struct list_head link;
6996822904SAndy Whitcroft 	long from;
7096822904SAndy Whitcroft 	long to;
7196822904SAndy Whitcroft };
7296822904SAndy Whitcroft 
7396822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t)
7496822904SAndy Whitcroft {
7596822904SAndy Whitcroft 	struct file_region *rg, *nrg, *trg;
7696822904SAndy Whitcroft 
7796822904SAndy Whitcroft 	/* Locate the region we are either in or before. */
7896822904SAndy Whitcroft 	list_for_each_entry(rg, head, link)
7996822904SAndy Whitcroft 		if (f <= rg->to)
8096822904SAndy Whitcroft 			break;
8196822904SAndy Whitcroft 
8296822904SAndy Whitcroft 	/* Round our left edge to the current segment if it encloses us. */
8396822904SAndy Whitcroft 	if (f > rg->from)
8496822904SAndy Whitcroft 		f = rg->from;
8596822904SAndy Whitcroft 
8696822904SAndy Whitcroft 	/* Check for and consume any regions we now overlap with. */
8796822904SAndy Whitcroft 	nrg = rg;
8896822904SAndy Whitcroft 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
8996822904SAndy Whitcroft 		if (&rg->link == head)
9096822904SAndy Whitcroft 			break;
9196822904SAndy Whitcroft 		if (rg->from > t)
9296822904SAndy Whitcroft 			break;
9396822904SAndy Whitcroft 
9496822904SAndy Whitcroft 		/* If this area reaches higher then extend our area to
9596822904SAndy Whitcroft 		 * include it completely.  If this is not the first area
9696822904SAndy Whitcroft 		 * which we intend to reuse, free it. */
9796822904SAndy Whitcroft 		if (rg->to > t)
9896822904SAndy Whitcroft 			t = rg->to;
9996822904SAndy Whitcroft 		if (rg != nrg) {
10096822904SAndy Whitcroft 			list_del(&rg->link);
10196822904SAndy Whitcroft 			kfree(rg);
10296822904SAndy Whitcroft 		}
10396822904SAndy Whitcroft 	}
10496822904SAndy Whitcroft 	nrg->from = f;
10596822904SAndy Whitcroft 	nrg->to = t;
10696822904SAndy Whitcroft 	return 0;
10796822904SAndy Whitcroft }
10896822904SAndy Whitcroft 
10996822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t)
11096822904SAndy Whitcroft {
11196822904SAndy Whitcroft 	struct file_region *rg, *nrg;
11296822904SAndy Whitcroft 	long chg = 0;
11396822904SAndy Whitcroft 
11496822904SAndy Whitcroft 	/* Locate the region we are before or in. */
11596822904SAndy Whitcroft 	list_for_each_entry(rg, head, link)
11696822904SAndy Whitcroft 		if (f <= rg->to)
11796822904SAndy Whitcroft 			break;
11896822904SAndy Whitcroft 
11996822904SAndy Whitcroft 	/* If we are below the current region then a new region is required.
12096822904SAndy Whitcroft 	 * Subtle, allocate a new region at the position but make it zero
12196822904SAndy Whitcroft 	 * size such that we can guarantee to record the reservation. */
12296822904SAndy Whitcroft 	if (&rg->link == head || t < rg->from) {
12396822904SAndy Whitcroft 		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
12496822904SAndy Whitcroft 		if (!nrg)
12596822904SAndy Whitcroft 			return -ENOMEM;
12696822904SAndy Whitcroft 		nrg->from = f;
12796822904SAndy Whitcroft 		nrg->to   = f;
12896822904SAndy Whitcroft 		INIT_LIST_HEAD(&nrg->link);
12996822904SAndy Whitcroft 		list_add(&nrg->link, rg->link.prev);
13096822904SAndy Whitcroft 
13196822904SAndy Whitcroft 		return t - f;
13296822904SAndy Whitcroft 	}
13396822904SAndy Whitcroft 
13496822904SAndy Whitcroft 	/* Round our left edge to the current segment if it encloses us. */
13596822904SAndy Whitcroft 	if (f > rg->from)
13696822904SAndy Whitcroft 		f = rg->from;
13796822904SAndy Whitcroft 	chg = t - f;
13896822904SAndy Whitcroft 
13996822904SAndy Whitcroft 	/* Check for and consume any regions we now overlap with. */
14096822904SAndy Whitcroft 	list_for_each_entry(rg, rg->link.prev, link) {
14196822904SAndy Whitcroft 		if (&rg->link == head)
14296822904SAndy Whitcroft 			break;
14396822904SAndy Whitcroft 		if (rg->from > t)
14496822904SAndy Whitcroft 			return chg;
14596822904SAndy Whitcroft 
14696822904SAndy Whitcroft 		/* We overlap with this area, if it extends futher than
14796822904SAndy Whitcroft 		 * us then we must extend ourselves.  Account for its
14896822904SAndy Whitcroft 		 * existing reservation. */
14996822904SAndy Whitcroft 		if (rg->to > t) {
15096822904SAndy Whitcroft 			chg += rg->to - t;
15196822904SAndy Whitcroft 			t = rg->to;
15296822904SAndy Whitcroft 		}
15396822904SAndy Whitcroft 		chg -= rg->to - rg->from;
15496822904SAndy Whitcroft 	}
15596822904SAndy Whitcroft 	return chg;
15696822904SAndy Whitcroft }
15796822904SAndy Whitcroft 
15896822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end)
15996822904SAndy Whitcroft {
16096822904SAndy Whitcroft 	struct file_region *rg, *trg;
16196822904SAndy Whitcroft 	long chg = 0;
16296822904SAndy Whitcroft 
16396822904SAndy Whitcroft 	/* Locate the region we are either in or before. */
16496822904SAndy Whitcroft 	list_for_each_entry(rg, head, link)
16596822904SAndy Whitcroft 		if (end <= rg->to)
16696822904SAndy Whitcroft 			break;
16796822904SAndy Whitcroft 	if (&rg->link == head)
16896822904SAndy Whitcroft 		return 0;
16996822904SAndy Whitcroft 
17096822904SAndy Whitcroft 	/* If we are in the middle of a region then adjust it. */
17196822904SAndy Whitcroft 	if (end > rg->from) {
17296822904SAndy Whitcroft 		chg = rg->to - end;
17396822904SAndy Whitcroft 		rg->to = end;
17496822904SAndy Whitcroft 		rg = list_entry(rg->link.next, typeof(*rg), link);
17596822904SAndy Whitcroft 	}
17696822904SAndy Whitcroft 
17796822904SAndy Whitcroft 	/* Drop any remaining regions. */
17896822904SAndy Whitcroft 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
17996822904SAndy Whitcroft 		if (&rg->link == head)
18096822904SAndy Whitcroft 			break;
18196822904SAndy Whitcroft 		chg += rg->to - rg->from;
18296822904SAndy Whitcroft 		list_del(&rg->link);
18396822904SAndy Whitcroft 		kfree(rg);
18496822904SAndy Whitcroft 	}
18596822904SAndy Whitcroft 	return chg;
18696822904SAndy Whitcroft }
18796822904SAndy Whitcroft 
18884afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t)
18984afd99bSAndy Whitcroft {
19084afd99bSAndy Whitcroft 	struct file_region *rg;
19184afd99bSAndy Whitcroft 	long chg = 0;
19284afd99bSAndy Whitcroft 
19384afd99bSAndy Whitcroft 	/* Locate each segment we overlap with, and count that overlap. */
19484afd99bSAndy Whitcroft 	list_for_each_entry(rg, head, link) {
19584afd99bSAndy Whitcroft 		int seg_from;
19684afd99bSAndy Whitcroft 		int seg_to;
19784afd99bSAndy Whitcroft 
19884afd99bSAndy Whitcroft 		if (rg->to <= f)
19984afd99bSAndy Whitcroft 			continue;
20084afd99bSAndy Whitcroft 		if (rg->from >= t)
20184afd99bSAndy Whitcroft 			break;
20284afd99bSAndy Whitcroft 
20384afd99bSAndy Whitcroft 		seg_from = max(rg->from, f);
20484afd99bSAndy Whitcroft 		seg_to = min(rg->to, t);
20584afd99bSAndy Whitcroft 
20684afd99bSAndy Whitcroft 		chg += seg_to - seg_from;
20784afd99bSAndy Whitcroft 	}
20884afd99bSAndy Whitcroft 
20984afd99bSAndy Whitcroft 	return chg;
21084afd99bSAndy Whitcroft }
21184afd99bSAndy Whitcroft 
21296822904SAndy Whitcroft /*
213e7c4b0bfSAndy Whitcroft  * Convert the address within this vma to the page offset within
214e7c4b0bfSAndy Whitcroft  * the mapping, in pagecache page units; huge pages here.
215e7c4b0bfSAndy Whitcroft  */
216a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h,
217a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
218e7c4b0bfSAndy Whitcroft {
219a5516438SAndi Kleen 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
220a5516438SAndi Kleen 			(vma->vm_pgoff >> huge_page_order(h));
221e7c4b0bfSAndy Whitcroft }
222e7c4b0bfSAndy Whitcroft 
22384afd99bSAndy Whitcroft /*
22408fba699SMel Gorman  * Return the size of the pages allocated when backing a VMA. In the majority
22508fba699SMel Gorman  * cases this will be same size as used by the page table entries.
22608fba699SMel Gorman  */
22708fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
22808fba699SMel Gorman {
22908fba699SMel Gorman 	struct hstate *hstate;
23008fba699SMel Gorman 
23108fba699SMel Gorman 	if (!is_vm_hugetlb_page(vma))
23208fba699SMel Gorman 		return PAGE_SIZE;
23308fba699SMel Gorman 
23408fba699SMel Gorman 	hstate = hstate_vma(vma);
23508fba699SMel Gorman 
23608fba699SMel Gorman 	return 1UL << (hstate->order + PAGE_SHIFT);
23708fba699SMel Gorman }
238f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
23908fba699SMel Gorman 
24008fba699SMel Gorman /*
2413340289dSMel Gorman  * Return the page size being used by the MMU to back a VMA. In the majority
2423340289dSMel Gorman  * of cases, the page size used by the kernel matches the MMU size. On
2433340289dSMel Gorman  * architectures where it differs, an architecture-specific version of this
2443340289dSMel Gorman  * function is required.
2453340289dSMel Gorman  */
2463340289dSMel Gorman #ifndef vma_mmu_pagesize
2473340289dSMel Gorman unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
2483340289dSMel Gorman {
2493340289dSMel Gorman 	return vma_kernel_pagesize(vma);
2503340289dSMel Gorman }
2513340289dSMel Gorman #endif
2523340289dSMel Gorman 
2533340289dSMel Gorman /*
25484afd99bSAndy Whitcroft  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
25584afd99bSAndy Whitcroft  * bits of the reservation map pointer, which are always clear due to
25684afd99bSAndy Whitcroft  * alignment.
25784afd99bSAndy Whitcroft  */
25884afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER    (1UL << 0)
25984afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1)
26004f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
26184afd99bSAndy Whitcroft 
262a1e78772SMel Gorman /*
263a1e78772SMel Gorman  * These helpers are used to track how many pages are reserved for
264a1e78772SMel Gorman  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
265a1e78772SMel Gorman  * is guaranteed to have their future faults succeed.
266a1e78772SMel Gorman  *
267a1e78772SMel Gorman  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
268a1e78772SMel Gorman  * the reserve counters are updated with the hugetlb_lock held. It is safe
269a1e78772SMel Gorman  * to reset the VMA at fork() time as it is not in use yet and there is no
270a1e78772SMel Gorman  * chance of the global counters getting corrupted as a result of the values.
27184afd99bSAndy Whitcroft  *
27284afd99bSAndy Whitcroft  * The private mapping reservation is represented in a subtly different
27384afd99bSAndy Whitcroft  * manner to a shared mapping.  A shared mapping has a region map associated
27484afd99bSAndy Whitcroft  * with the underlying file, this region map represents the backing file
27584afd99bSAndy Whitcroft  * pages which have ever had a reservation assigned which this persists even
27684afd99bSAndy Whitcroft  * after the page is instantiated.  A private mapping has a region map
27784afd99bSAndy Whitcroft  * associated with the original mmap which is attached to all VMAs which
27884afd99bSAndy Whitcroft  * reference it, this region map represents those offsets which have consumed
27984afd99bSAndy Whitcroft  * reservation ie. where pages have been instantiated.
280a1e78772SMel Gorman  */
281e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma)
282e7c4b0bfSAndy Whitcroft {
283e7c4b0bfSAndy Whitcroft 	return (unsigned long)vma->vm_private_data;
284e7c4b0bfSAndy Whitcroft }
285e7c4b0bfSAndy Whitcroft 
286e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma,
287e7c4b0bfSAndy Whitcroft 							unsigned long value)
288e7c4b0bfSAndy Whitcroft {
289e7c4b0bfSAndy Whitcroft 	vma->vm_private_data = (void *)value;
290e7c4b0bfSAndy Whitcroft }
291e7c4b0bfSAndy Whitcroft 
29284afd99bSAndy Whitcroft struct resv_map {
29384afd99bSAndy Whitcroft 	struct kref refs;
29484afd99bSAndy Whitcroft 	struct list_head regions;
29584afd99bSAndy Whitcroft };
29684afd99bSAndy Whitcroft 
2972a4b3dedSHarvey Harrison static struct resv_map *resv_map_alloc(void)
29884afd99bSAndy Whitcroft {
29984afd99bSAndy Whitcroft 	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
30084afd99bSAndy Whitcroft 	if (!resv_map)
30184afd99bSAndy Whitcroft 		return NULL;
30284afd99bSAndy Whitcroft 
30384afd99bSAndy Whitcroft 	kref_init(&resv_map->refs);
30484afd99bSAndy Whitcroft 	INIT_LIST_HEAD(&resv_map->regions);
30584afd99bSAndy Whitcroft 
30684afd99bSAndy Whitcroft 	return resv_map;
30784afd99bSAndy Whitcroft }
30884afd99bSAndy Whitcroft 
3092a4b3dedSHarvey Harrison static void resv_map_release(struct kref *ref)
31084afd99bSAndy Whitcroft {
31184afd99bSAndy Whitcroft 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
31284afd99bSAndy Whitcroft 
31384afd99bSAndy Whitcroft 	/* Clear out any active regions before we release the map. */
31484afd99bSAndy Whitcroft 	region_truncate(&resv_map->regions, 0);
31584afd99bSAndy Whitcroft 	kfree(resv_map);
31684afd99bSAndy Whitcroft }
31784afd99bSAndy Whitcroft 
31884afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
319a1e78772SMel Gorman {
320a1e78772SMel Gorman 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
321f83a275dSMel Gorman 	if (!(vma->vm_flags & VM_MAYSHARE))
32284afd99bSAndy Whitcroft 		return (struct resv_map *)(get_vma_private_data(vma) &
32384afd99bSAndy Whitcroft 							~HPAGE_RESV_MASK);
3242a4b3dedSHarvey Harrison 	return NULL;
325a1e78772SMel Gorman }
326a1e78772SMel Gorman 
32784afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
328a1e78772SMel Gorman {
329a1e78772SMel Gorman 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
330f83a275dSMel Gorman 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
331a1e78772SMel Gorman 
33284afd99bSAndy Whitcroft 	set_vma_private_data(vma, (get_vma_private_data(vma) &
33384afd99bSAndy Whitcroft 				HPAGE_RESV_MASK) | (unsigned long)map);
33404f2cbe3SMel Gorman }
33504f2cbe3SMel Gorman 
33604f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
33704f2cbe3SMel Gorman {
33804f2cbe3SMel Gorman 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
339f83a275dSMel Gorman 	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
340e7c4b0bfSAndy Whitcroft 
341e7c4b0bfSAndy Whitcroft 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
34204f2cbe3SMel Gorman }
34304f2cbe3SMel Gorman 
34404f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
34504f2cbe3SMel Gorman {
34604f2cbe3SMel Gorman 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
347e7c4b0bfSAndy Whitcroft 
348e7c4b0bfSAndy Whitcroft 	return (get_vma_private_data(vma) & flag) != 0;
349a1e78772SMel Gorman }
350a1e78772SMel Gorman 
351a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */
352a5516438SAndi Kleen static void decrement_hugepage_resv_vma(struct hstate *h,
353a5516438SAndi Kleen 			struct vm_area_struct *vma)
354a1e78772SMel Gorman {
355c37f9fb1SAndy Whitcroft 	if (vma->vm_flags & VM_NORESERVE)
356c37f9fb1SAndy Whitcroft 		return;
357c37f9fb1SAndy Whitcroft 
358f83a275dSMel Gorman 	if (vma->vm_flags & VM_MAYSHARE) {
359a1e78772SMel Gorman 		/* Shared mappings always use reserves */
360a5516438SAndi Kleen 		h->resv_huge_pages--;
36184afd99bSAndy Whitcroft 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
362a1e78772SMel Gorman 		/*
363a1e78772SMel Gorman 		 * Only the process that called mmap() has reserves for
364a1e78772SMel Gorman 		 * private mappings.
365a1e78772SMel Gorman 		 */
366a5516438SAndi Kleen 		h->resv_huge_pages--;
367a1e78772SMel Gorman 	}
368a1e78772SMel Gorman }
369a1e78772SMel Gorman 
37004f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
371a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
372a1e78772SMel Gorman {
373a1e78772SMel Gorman 	VM_BUG_ON(!is_vm_hugetlb_page(vma));
374f83a275dSMel Gorman 	if (!(vma->vm_flags & VM_MAYSHARE))
375a1e78772SMel Gorman 		vma->vm_private_data = (void *)0;
376a1e78772SMel Gorman }
377a1e78772SMel Gorman 
378a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */
3797f09ca51SMel Gorman static int vma_has_reserves(struct vm_area_struct *vma)
380a1e78772SMel Gorman {
381f83a275dSMel Gorman 	if (vma->vm_flags & VM_MAYSHARE)
382a1e78772SMel Gorman 		return 1;
3837f09ca51SMel Gorman 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3847f09ca51SMel Gorman 		return 1;
3857f09ca51SMel Gorman 	return 0;
386a1e78772SMel Gorman }
387a1e78772SMel Gorman 
38869d177c2SAndy Whitcroft static void clear_gigantic_page(struct page *page,
38969d177c2SAndy Whitcroft 			unsigned long addr, unsigned long sz)
39069d177c2SAndy Whitcroft {
39169d177c2SAndy Whitcroft 	int i;
39269d177c2SAndy Whitcroft 	struct page *p = page;
39369d177c2SAndy Whitcroft 
39469d177c2SAndy Whitcroft 	might_sleep();
39569d177c2SAndy Whitcroft 	for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
39669d177c2SAndy Whitcroft 		cond_resched();
39769d177c2SAndy Whitcroft 		clear_user_highpage(p, addr + i * PAGE_SIZE);
39869d177c2SAndy Whitcroft 	}
39969d177c2SAndy Whitcroft }
400a5516438SAndi Kleen static void clear_huge_page(struct page *page,
401a5516438SAndi Kleen 			unsigned long addr, unsigned long sz)
40279ac6ba4SDavid Gibson {
40379ac6ba4SDavid Gibson 	int i;
40479ac6ba4SDavid Gibson 
405ebdd4aeaSHannes Eder 	if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
406ebdd4aeaSHannes Eder 		clear_gigantic_page(page, addr, sz);
407ebdd4aeaSHannes Eder 		return;
408ebdd4aeaSHannes Eder 	}
40969d177c2SAndy Whitcroft 
41079ac6ba4SDavid Gibson 	might_sleep();
411a5516438SAndi Kleen 	for (i = 0; i < sz/PAGE_SIZE; i++) {
41279ac6ba4SDavid Gibson 		cond_resched();
413281e0e3bSRalf Baechle 		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
41479ac6ba4SDavid Gibson 	}
41579ac6ba4SDavid Gibson }
41679ac6ba4SDavid Gibson 
41769d177c2SAndy Whitcroft static void copy_gigantic_page(struct page *dst, struct page *src,
41869d177c2SAndy Whitcroft 			   unsigned long addr, struct vm_area_struct *vma)
41969d177c2SAndy Whitcroft {
42069d177c2SAndy Whitcroft 	int i;
42169d177c2SAndy Whitcroft 	struct hstate *h = hstate_vma(vma);
42269d177c2SAndy Whitcroft 	struct page *dst_base = dst;
42369d177c2SAndy Whitcroft 	struct page *src_base = src;
42469d177c2SAndy Whitcroft 	might_sleep();
42569d177c2SAndy Whitcroft 	for (i = 0; i < pages_per_huge_page(h); ) {
42669d177c2SAndy Whitcroft 		cond_resched();
42769d177c2SAndy Whitcroft 		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
42869d177c2SAndy Whitcroft 
42969d177c2SAndy Whitcroft 		i++;
43069d177c2SAndy Whitcroft 		dst = mem_map_next(dst, dst_base, i);
43169d177c2SAndy Whitcroft 		src = mem_map_next(src, src_base, i);
43269d177c2SAndy Whitcroft 	}
43369d177c2SAndy Whitcroft }
43479ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src,
4359de455b2SAtsushi Nemoto 			   unsigned long addr, struct vm_area_struct *vma)
43679ac6ba4SDavid Gibson {
43779ac6ba4SDavid Gibson 	int i;
438a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
43979ac6ba4SDavid Gibson 
440ebdd4aeaSHannes Eder 	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
441ebdd4aeaSHannes Eder 		copy_gigantic_page(dst, src, addr, vma);
442ebdd4aeaSHannes Eder 		return;
443ebdd4aeaSHannes Eder 	}
44469d177c2SAndy Whitcroft 
44579ac6ba4SDavid Gibson 	might_sleep();
446a5516438SAndi Kleen 	for (i = 0; i < pages_per_huge_page(h); i++) {
44779ac6ba4SDavid Gibson 		cond_resched();
4489de455b2SAtsushi Nemoto 		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
44979ac6ba4SDavid Gibson 	}
45079ac6ba4SDavid Gibson }
45179ac6ba4SDavid Gibson 
452a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page)
4531da177e4SLinus Torvalds {
4541da177e4SLinus Torvalds 	int nid = page_to_nid(page);
455a5516438SAndi Kleen 	list_add(&page->lru, &h->hugepage_freelists[nid]);
456a5516438SAndi Kleen 	h->free_huge_pages++;
457a5516438SAndi Kleen 	h->free_huge_pages_node[nid]++;
4581da177e4SLinus Torvalds }
4591da177e4SLinus Torvalds 
460a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h,
461a5516438SAndi Kleen 				struct vm_area_struct *vma,
46204f2cbe3SMel Gorman 				unsigned long address, int avoid_reserve)
4631da177e4SLinus Torvalds {
46431a5c6e4SNishanth Aravamudan 	int nid;
4651da177e4SLinus Torvalds 	struct page *page = NULL;
466480eccf9SLee Schermerhorn 	struct mempolicy *mpol;
46719770b32SMel Gorman 	nodemask_t *nodemask;
468396faf03SMel Gorman 	struct zonelist *zonelist = huge_zonelist(vma, address,
46919770b32SMel Gorman 					htlb_alloc_mask, &mpol, &nodemask);
470dd1a239fSMel Gorman 	struct zone *zone;
471dd1a239fSMel Gorman 	struct zoneref *z;
4721da177e4SLinus Torvalds 
473a1e78772SMel Gorman 	/*
474a1e78772SMel Gorman 	 * A child process with MAP_PRIVATE mappings created by their parent
475a1e78772SMel Gorman 	 * have no page reserves. This check ensures that reservations are
476a1e78772SMel Gorman 	 * not "stolen". The child may still get SIGKILLed
477a1e78772SMel Gorman 	 */
4787f09ca51SMel Gorman 	if (!vma_has_reserves(vma) &&
479a5516438SAndi Kleen 			h->free_huge_pages - h->resv_huge_pages == 0)
480a1e78772SMel Gorman 		return NULL;
481a1e78772SMel Gorman 
48204f2cbe3SMel Gorman 	/* If reserves cannot be used, ensure enough pages are in the pool */
483a5516438SAndi Kleen 	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
48404f2cbe3SMel Gorman 		return NULL;
48504f2cbe3SMel Gorman 
48619770b32SMel Gorman 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
48719770b32SMel Gorman 						MAX_NR_ZONES - 1, nodemask) {
48854a6eb5cSMel Gorman 		nid = zone_to_nid(zone);
48954a6eb5cSMel Gorman 		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
490a5516438SAndi Kleen 		    !list_empty(&h->hugepage_freelists[nid])) {
491a5516438SAndi Kleen 			page = list_entry(h->hugepage_freelists[nid].next,
4921da177e4SLinus Torvalds 					  struct page, lru);
4931da177e4SLinus Torvalds 			list_del(&page->lru);
494a5516438SAndi Kleen 			h->free_huge_pages--;
495a5516438SAndi Kleen 			h->free_huge_pages_node[nid]--;
49604f2cbe3SMel Gorman 
49704f2cbe3SMel Gorman 			if (!avoid_reserve)
498a5516438SAndi Kleen 				decrement_hugepage_resv_vma(h, vma);
499a1e78772SMel Gorman 
5005ab3ee7bSKen Chen 			break;
5011da177e4SLinus Torvalds 		}
5023abf7afdSAndrew Morton 	}
50352cd3b07SLee Schermerhorn 	mpol_cond_put(mpol);
5041da177e4SLinus Torvalds 	return page;
5051da177e4SLinus Torvalds }
5061da177e4SLinus Torvalds 
507a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page)
5086af2acb6SAdam Litke {
5096af2acb6SAdam Litke 	int i;
510a5516438SAndi Kleen 
51118229df5SAndy Whitcroft 	VM_BUG_ON(h->order >= MAX_ORDER);
51218229df5SAndy Whitcroft 
513a5516438SAndi Kleen 	h->nr_huge_pages--;
514a5516438SAndi Kleen 	h->nr_huge_pages_node[page_to_nid(page)]--;
515a5516438SAndi Kleen 	for (i = 0; i < pages_per_huge_page(h); i++) {
5166af2acb6SAdam Litke 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
5176af2acb6SAdam Litke 				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
5186af2acb6SAdam Litke 				1 << PG_private | 1<< PG_writeback);
5196af2acb6SAdam Litke 	}
5206af2acb6SAdam Litke 	set_compound_page_dtor(page, NULL);
5216af2acb6SAdam Litke 	set_page_refcounted(page);
5227f2e9525SGerald Schaefer 	arch_release_hugepage(page);
523a5516438SAndi Kleen 	__free_pages(page, huge_page_order(h));
5246af2acb6SAdam Litke }
5256af2acb6SAdam Litke 
526e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size)
527e5ff2159SAndi Kleen {
528e5ff2159SAndi Kleen 	struct hstate *h;
529e5ff2159SAndi Kleen 
530e5ff2159SAndi Kleen 	for_each_hstate(h) {
531e5ff2159SAndi Kleen 		if (huge_page_size(h) == size)
532e5ff2159SAndi Kleen 			return h;
533e5ff2159SAndi Kleen 	}
534e5ff2159SAndi Kleen 	return NULL;
535e5ff2159SAndi Kleen }
536e5ff2159SAndi Kleen 
53727a85ef1SDavid Gibson static void free_huge_page(struct page *page)
53827a85ef1SDavid Gibson {
539a5516438SAndi Kleen 	/*
540a5516438SAndi Kleen 	 * Can't pass hstate in here because it is called from the
541a5516438SAndi Kleen 	 * compound page destructor.
542a5516438SAndi Kleen 	 */
543e5ff2159SAndi Kleen 	struct hstate *h = page_hstate(page);
5447893d1d5SAdam Litke 	int nid = page_to_nid(page);
545c79fb75eSAdam Litke 	struct address_space *mapping;
54627a85ef1SDavid Gibson 
547c79fb75eSAdam Litke 	mapping = (struct address_space *) page_private(page);
548e5df70abSAndy Whitcroft 	set_page_private(page, 0);
5497893d1d5SAdam Litke 	BUG_ON(page_count(page));
55027a85ef1SDavid Gibson 	INIT_LIST_HEAD(&page->lru);
55127a85ef1SDavid Gibson 
55227a85ef1SDavid Gibson 	spin_lock(&hugetlb_lock);
553aa888a74SAndi Kleen 	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
554a5516438SAndi Kleen 		update_and_free_page(h, page);
555a5516438SAndi Kleen 		h->surplus_huge_pages--;
556a5516438SAndi Kleen 		h->surplus_huge_pages_node[nid]--;
5577893d1d5SAdam Litke 	} else {
558a5516438SAndi Kleen 		enqueue_huge_page(h, page);
5597893d1d5SAdam Litke 	}
56027a85ef1SDavid Gibson 	spin_unlock(&hugetlb_lock);
561c79fb75eSAdam Litke 	if (mapping)
5629a119c05SAdam Litke 		hugetlb_put_quota(mapping, 1);
56327a85ef1SDavid Gibson }
56427a85ef1SDavid Gibson 
565a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
566b7ba30c6SAndi Kleen {
567b7ba30c6SAndi Kleen 	set_compound_page_dtor(page, free_huge_page);
568b7ba30c6SAndi Kleen 	spin_lock(&hugetlb_lock);
569a5516438SAndi Kleen 	h->nr_huge_pages++;
570a5516438SAndi Kleen 	h->nr_huge_pages_node[nid]++;
571b7ba30c6SAndi Kleen 	spin_unlock(&hugetlb_lock);
572b7ba30c6SAndi Kleen 	put_page(page); /* free it into the hugepage allocator */
573b7ba30c6SAndi Kleen }
574b7ba30c6SAndi Kleen 
57520a0307cSWu Fengguang static void prep_compound_gigantic_page(struct page *page, unsigned long order)
57620a0307cSWu Fengguang {
57720a0307cSWu Fengguang 	int i;
57820a0307cSWu Fengguang 	int nr_pages = 1 << order;
57920a0307cSWu Fengguang 	struct page *p = page + 1;
58020a0307cSWu Fengguang 
58120a0307cSWu Fengguang 	/* we rely on prep_new_huge_page to set the destructor */
58220a0307cSWu Fengguang 	set_compound_order(page, order);
58320a0307cSWu Fengguang 	__SetPageHead(page);
58420a0307cSWu Fengguang 	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
58520a0307cSWu Fengguang 		__SetPageTail(p);
58620a0307cSWu Fengguang 		p->first_page = page;
58720a0307cSWu Fengguang 	}
58820a0307cSWu Fengguang }
58920a0307cSWu Fengguang 
59020a0307cSWu Fengguang int PageHuge(struct page *page)
59120a0307cSWu Fengguang {
59220a0307cSWu Fengguang 	compound_page_dtor *dtor;
59320a0307cSWu Fengguang 
59420a0307cSWu Fengguang 	if (!PageCompound(page))
59520a0307cSWu Fengguang 		return 0;
59620a0307cSWu Fengguang 
59720a0307cSWu Fengguang 	page = compound_head(page);
59820a0307cSWu Fengguang 	dtor = get_compound_page_dtor(page);
59920a0307cSWu Fengguang 
60020a0307cSWu Fengguang 	return dtor == free_huge_page;
60120a0307cSWu Fengguang }
60220a0307cSWu Fengguang 
603a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
6041da177e4SLinus Torvalds {
6051da177e4SLinus Torvalds 	struct page *page;
606f96efd58SJoe Jin 
607aa888a74SAndi Kleen 	if (h->order >= MAX_ORDER)
608aa888a74SAndi Kleen 		return NULL;
609aa888a74SAndi Kleen 
6106484eb3eSMel Gorman 	page = alloc_pages_exact_node(nid,
611551883aeSNishanth Aravamudan 		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
612551883aeSNishanth Aravamudan 						__GFP_REPEAT|__GFP_NOWARN,
613a5516438SAndi Kleen 		huge_page_order(h));
6141da177e4SLinus Torvalds 	if (page) {
6157f2e9525SGerald Schaefer 		if (arch_prepare_hugepage(page)) {
616caff3a2cSGerald Schaefer 			__free_pages(page, huge_page_order(h));
6177b8ee84dSHarvey Harrison 			return NULL;
6187f2e9525SGerald Schaefer 		}
619a5516438SAndi Kleen 		prep_new_huge_page(h, page, nid);
6201da177e4SLinus Torvalds 	}
62163b4613cSNishanth Aravamudan 
62263b4613cSNishanth Aravamudan 	return page;
62363b4613cSNishanth Aravamudan }
62463b4613cSNishanth Aravamudan 
6255ced66c9SAndi Kleen /*
6266ae11b27SLee Schermerhorn  * common helper functions for hstate_next_node_to_{alloc|free}.
6276ae11b27SLee Schermerhorn  * We may have allocated or freed a huge page based on a different
6286ae11b27SLee Schermerhorn  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
6296ae11b27SLee Schermerhorn  * be outside of *nodes_allowed.  Ensure that we use an allowed
6306ae11b27SLee Schermerhorn  * node for alloc or free.
6319a76db09SLee Schermerhorn  */
6326ae11b27SLee Schermerhorn static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
6339a76db09SLee Schermerhorn {
6346ae11b27SLee Schermerhorn 	nid = next_node(nid, *nodes_allowed);
6359a76db09SLee Schermerhorn 	if (nid == MAX_NUMNODES)
6366ae11b27SLee Schermerhorn 		nid = first_node(*nodes_allowed);
6379a76db09SLee Schermerhorn 	VM_BUG_ON(nid >= MAX_NUMNODES);
6389a76db09SLee Schermerhorn 
6399a76db09SLee Schermerhorn 	return nid;
6409a76db09SLee Schermerhorn }
6419a76db09SLee Schermerhorn 
6426ae11b27SLee Schermerhorn static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
6435ced66c9SAndi Kleen {
6446ae11b27SLee Schermerhorn 	if (!node_isset(nid, *nodes_allowed))
6456ae11b27SLee Schermerhorn 		nid = next_node_allowed(nid, nodes_allowed);
6469a76db09SLee Schermerhorn 	return nid;
6475ced66c9SAndi Kleen }
6485ced66c9SAndi Kleen 
6496ae11b27SLee Schermerhorn /*
6506ae11b27SLee Schermerhorn  * returns the previously saved node ["this node"] from which to
6516ae11b27SLee Schermerhorn  * allocate a persistent huge page for the pool and advance the
6526ae11b27SLee Schermerhorn  * next node from which to allocate, handling wrap at end of node
6536ae11b27SLee Schermerhorn  * mask.
6546ae11b27SLee Schermerhorn  */
6556ae11b27SLee Schermerhorn static int hstate_next_node_to_alloc(struct hstate *h,
6566ae11b27SLee Schermerhorn 					nodemask_t *nodes_allowed)
6576ae11b27SLee Schermerhorn {
6586ae11b27SLee Schermerhorn 	int nid;
6596ae11b27SLee Schermerhorn 
6606ae11b27SLee Schermerhorn 	VM_BUG_ON(!nodes_allowed);
6616ae11b27SLee Schermerhorn 
6626ae11b27SLee Schermerhorn 	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
6636ae11b27SLee Schermerhorn 	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
6646ae11b27SLee Schermerhorn 
6656ae11b27SLee Schermerhorn 	return nid;
6666ae11b27SLee Schermerhorn }
6676ae11b27SLee Schermerhorn 
6686ae11b27SLee Schermerhorn static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
66963b4613cSNishanth Aravamudan {
67063b4613cSNishanth Aravamudan 	struct page *page;
67163b4613cSNishanth Aravamudan 	int start_nid;
67263b4613cSNishanth Aravamudan 	int next_nid;
67363b4613cSNishanth Aravamudan 	int ret = 0;
67463b4613cSNishanth Aravamudan 
6756ae11b27SLee Schermerhorn 	start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
676e8c5c824SLee Schermerhorn 	next_nid = start_nid;
67763b4613cSNishanth Aravamudan 
67863b4613cSNishanth Aravamudan 	do {
679e8c5c824SLee Schermerhorn 		page = alloc_fresh_huge_page_node(h, next_nid);
6809a76db09SLee Schermerhorn 		if (page) {
68163b4613cSNishanth Aravamudan 			ret = 1;
6829a76db09SLee Schermerhorn 			break;
6839a76db09SLee Schermerhorn 		}
6846ae11b27SLee Schermerhorn 		next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
6859a76db09SLee Schermerhorn 	} while (next_nid != start_nid);
68663b4613cSNishanth Aravamudan 
6873b116300SAdam Litke 	if (ret)
6883b116300SAdam Litke 		count_vm_event(HTLB_BUDDY_PGALLOC);
6893b116300SAdam Litke 	else
6903b116300SAdam Litke 		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
6913b116300SAdam Litke 
69263b4613cSNishanth Aravamudan 	return ret;
6931da177e4SLinus Torvalds }
6941da177e4SLinus Torvalds 
695e8c5c824SLee Schermerhorn /*
6966ae11b27SLee Schermerhorn  * helper for free_pool_huge_page() - return the previously saved
6976ae11b27SLee Schermerhorn  * node ["this node"] from which to free a huge page.  Advance the
6986ae11b27SLee Schermerhorn  * next node id whether or not we find a free huge page to free so
6996ae11b27SLee Schermerhorn  * that the next attempt to free addresses the next node.
700e8c5c824SLee Schermerhorn  */
7016ae11b27SLee Schermerhorn static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
702e8c5c824SLee Schermerhorn {
7036ae11b27SLee Schermerhorn 	int nid;
7049a76db09SLee Schermerhorn 
7056ae11b27SLee Schermerhorn 	VM_BUG_ON(!nodes_allowed);
7066ae11b27SLee Schermerhorn 
7076ae11b27SLee Schermerhorn 	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
7086ae11b27SLee Schermerhorn 	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
7096ae11b27SLee Schermerhorn 
7109a76db09SLee Schermerhorn 	return nid;
711e8c5c824SLee Schermerhorn }
712e8c5c824SLee Schermerhorn 
713e8c5c824SLee Schermerhorn /*
714e8c5c824SLee Schermerhorn  * Free huge page from pool from next node to free.
715e8c5c824SLee Schermerhorn  * Attempt to keep persistent huge pages more or less
716e8c5c824SLee Schermerhorn  * balanced over allowed nodes.
717e8c5c824SLee Schermerhorn  * Called with hugetlb_lock locked.
718e8c5c824SLee Schermerhorn  */
7196ae11b27SLee Schermerhorn static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
7206ae11b27SLee Schermerhorn 							 bool acct_surplus)
721e8c5c824SLee Schermerhorn {
722e8c5c824SLee Schermerhorn 	int start_nid;
723e8c5c824SLee Schermerhorn 	int next_nid;
724e8c5c824SLee Schermerhorn 	int ret = 0;
725e8c5c824SLee Schermerhorn 
7266ae11b27SLee Schermerhorn 	start_nid = hstate_next_node_to_free(h, nodes_allowed);
727e8c5c824SLee Schermerhorn 	next_nid = start_nid;
728e8c5c824SLee Schermerhorn 
729e8c5c824SLee Schermerhorn 	do {
730685f3457SLee Schermerhorn 		/*
731685f3457SLee Schermerhorn 		 * If we're returning unused surplus pages, only examine
732685f3457SLee Schermerhorn 		 * nodes with surplus pages.
733685f3457SLee Schermerhorn 		 */
734685f3457SLee Schermerhorn 		if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
735685f3457SLee Schermerhorn 		    !list_empty(&h->hugepage_freelists[next_nid])) {
736e8c5c824SLee Schermerhorn 			struct page *page =
737e8c5c824SLee Schermerhorn 				list_entry(h->hugepage_freelists[next_nid].next,
738e8c5c824SLee Schermerhorn 					  struct page, lru);
739e8c5c824SLee Schermerhorn 			list_del(&page->lru);
740e8c5c824SLee Schermerhorn 			h->free_huge_pages--;
741e8c5c824SLee Schermerhorn 			h->free_huge_pages_node[next_nid]--;
742685f3457SLee Schermerhorn 			if (acct_surplus) {
743685f3457SLee Schermerhorn 				h->surplus_huge_pages--;
744685f3457SLee Schermerhorn 				h->surplus_huge_pages_node[next_nid]--;
745685f3457SLee Schermerhorn 			}
746e8c5c824SLee Schermerhorn 			update_and_free_page(h, page);
747e8c5c824SLee Schermerhorn 			ret = 1;
7489a76db09SLee Schermerhorn 			break;
749e8c5c824SLee Schermerhorn 		}
7506ae11b27SLee Schermerhorn 		next_nid = hstate_next_node_to_free(h, nodes_allowed);
7519a76db09SLee Schermerhorn 	} while (next_nid != start_nid);
752e8c5c824SLee Schermerhorn 
753e8c5c824SLee Schermerhorn 	return ret;
754e8c5c824SLee Schermerhorn }
755e8c5c824SLee Schermerhorn 
756a5516438SAndi Kleen static struct page *alloc_buddy_huge_page(struct hstate *h,
757a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
7587893d1d5SAdam Litke {
7597893d1d5SAdam Litke 	struct page *page;
760d1c3fb1fSNishanth Aravamudan 	unsigned int nid;
7617893d1d5SAdam Litke 
762aa888a74SAndi Kleen 	if (h->order >= MAX_ORDER)
763aa888a74SAndi Kleen 		return NULL;
764aa888a74SAndi Kleen 
765d1c3fb1fSNishanth Aravamudan 	/*
766d1c3fb1fSNishanth Aravamudan 	 * Assume we will successfully allocate the surplus page to
767d1c3fb1fSNishanth Aravamudan 	 * prevent racing processes from causing the surplus to exceed
768d1c3fb1fSNishanth Aravamudan 	 * overcommit
769d1c3fb1fSNishanth Aravamudan 	 *
770d1c3fb1fSNishanth Aravamudan 	 * This however introduces a different race, where a process B
771d1c3fb1fSNishanth Aravamudan 	 * tries to grow the static hugepage pool while alloc_pages() is
772d1c3fb1fSNishanth Aravamudan 	 * called by process A. B will only examine the per-node
773d1c3fb1fSNishanth Aravamudan 	 * counters in determining if surplus huge pages can be
774d1c3fb1fSNishanth Aravamudan 	 * converted to normal huge pages in adjust_pool_surplus(). A
775d1c3fb1fSNishanth Aravamudan 	 * won't be able to increment the per-node counter, until the
776d1c3fb1fSNishanth Aravamudan 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
777d1c3fb1fSNishanth Aravamudan 	 * no more huge pages can be converted from surplus to normal
778d1c3fb1fSNishanth Aravamudan 	 * state (and doesn't try to convert again). Thus, we have a
779d1c3fb1fSNishanth Aravamudan 	 * case where a surplus huge page exists, the pool is grown, and
780d1c3fb1fSNishanth Aravamudan 	 * the surplus huge page still exists after, even though it
781d1c3fb1fSNishanth Aravamudan 	 * should just have been converted to a normal huge page. This
782d1c3fb1fSNishanth Aravamudan 	 * does not leak memory, though, as the hugepage will be freed
783d1c3fb1fSNishanth Aravamudan 	 * once it is out of use. It also does not allow the counters to
784d1c3fb1fSNishanth Aravamudan 	 * go out of whack in adjust_pool_surplus() as we don't modify
785d1c3fb1fSNishanth Aravamudan 	 * the node values until we've gotten the hugepage and only the
786d1c3fb1fSNishanth Aravamudan 	 * per-node value is checked there.
787d1c3fb1fSNishanth Aravamudan 	 */
788d1c3fb1fSNishanth Aravamudan 	spin_lock(&hugetlb_lock);
789a5516438SAndi Kleen 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
790d1c3fb1fSNishanth Aravamudan 		spin_unlock(&hugetlb_lock);
791d1c3fb1fSNishanth Aravamudan 		return NULL;
792d1c3fb1fSNishanth Aravamudan 	} else {
793a5516438SAndi Kleen 		h->nr_huge_pages++;
794a5516438SAndi Kleen 		h->surplus_huge_pages++;
795d1c3fb1fSNishanth Aravamudan 	}
796d1c3fb1fSNishanth Aravamudan 	spin_unlock(&hugetlb_lock);
797d1c3fb1fSNishanth Aravamudan 
798551883aeSNishanth Aravamudan 	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
799551883aeSNishanth Aravamudan 					__GFP_REPEAT|__GFP_NOWARN,
800a5516438SAndi Kleen 					huge_page_order(h));
801d1c3fb1fSNishanth Aravamudan 
802caff3a2cSGerald Schaefer 	if (page && arch_prepare_hugepage(page)) {
803caff3a2cSGerald Schaefer 		__free_pages(page, huge_page_order(h));
804caff3a2cSGerald Schaefer 		return NULL;
805caff3a2cSGerald Schaefer 	}
806caff3a2cSGerald Schaefer 
8077893d1d5SAdam Litke 	spin_lock(&hugetlb_lock);
808d1c3fb1fSNishanth Aravamudan 	if (page) {
8092668db91SAdam Litke 		/*
8102668db91SAdam Litke 		 * This page is now managed by the hugetlb allocator and has
8112668db91SAdam Litke 		 * no users -- drop the buddy allocator's reference.
8122668db91SAdam Litke 		 */
8132668db91SAdam Litke 		put_page_testzero(page);
8142668db91SAdam Litke 		VM_BUG_ON(page_count(page));
815d1c3fb1fSNishanth Aravamudan 		nid = page_to_nid(page);
816d1c3fb1fSNishanth Aravamudan 		set_compound_page_dtor(page, free_huge_page);
817d1c3fb1fSNishanth Aravamudan 		/*
818d1c3fb1fSNishanth Aravamudan 		 * We incremented the global counters already
819d1c3fb1fSNishanth Aravamudan 		 */
820a5516438SAndi Kleen 		h->nr_huge_pages_node[nid]++;
821a5516438SAndi Kleen 		h->surplus_huge_pages_node[nid]++;
8223b116300SAdam Litke 		__count_vm_event(HTLB_BUDDY_PGALLOC);
823d1c3fb1fSNishanth Aravamudan 	} else {
824a5516438SAndi Kleen 		h->nr_huge_pages--;
825a5516438SAndi Kleen 		h->surplus_huge_pages--;
8263b116300SAdam Litke 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
8277893d1d5SAdam Litke 	}
828d1c3fb1fSNishanth Aravamudan 	spin_unlock(&hugetlb_lock);
8297893d1d5SAdam Litke 
8307893d1d5SAdam Litke 	return page;
8317893d1d5SAdam Litke }
8327893d1d5SAdam Litke 
833e4e574b7SAdam Litke /*
834e4e574b7SAdam Litke  * Increase the hugetlb pool such that it can accomodate a reservation
835e4e574b7SAdam Litke  * of size 'delta'.
836e4e574b7SAdam Litke  */
837a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta)
838e4e574b7SAdam Litke {
839e4e574b7SAdam Litke 	struct list_head surplus_list;
840e4e574b7SAdam Litke 	struct page *page, *tmp;
841e4e574b7SAdam Litke 	int ret, i;
842e4e574b7SAdam Litke 	int needed, allocated;
843e4e574b7SAdam Litke 
844a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
845ac09b3a1SAdam Litke 	if (needed <= 0) {
846a5516438SAndi Kleen 		h->resv_huge_pages += delta;
847e4e574b7SAdam Litke 		return 0;
848ac09b3a1SAdam Litke 	}
849e4e574b7SAdam Litke 
850e4e574b7SAdam Litke 	allocated = 0;
851e4e574b7SAdam Litke 	INIT_LIST_HEAD(&surplus_list);
852e4e574b7SAdam Litke 
853e4e574b7SAdam Litke 	ret = -ENOMEM;
854e4e574b7SAdam Litke retry:
855e4e574b7SAdam Litke 	spin_unlock(&hugetlb_lock);
856e4e574b7SAdam Litke 	for (i = 0; i < needed; i++) {
857a5516438SAndi Kleen 		page = alloc_buddy_huge_page(h, NULL, 0);
858e4e574b7SAdam Litke 		if (!page) {
859e4e574b7SAdam Litke 			/*
860e4e574b7SAdam Litke 			 * We were not able to allocate enough pages to
861e4e574b7SAdam Litke 			 * satisfy the entire reservation so we free what
862e4e574b7SAdam Litke 			 * we've allocated so far.
863e4e574b7SAdam Litke 			 */
864e4e574b7SAdam Litke 			spin_lock(&hugetlb_lock);
865e4e574b7SAdam Litke 			needed = 0;
866e4e574b7SAdam Litke 			goto free;
867e4e574b7SAdam Litke 		}
868e4e574b7SAdam Litke 
869e4e574b7SAdam Litke 		list_add(&page->lru, &surplus_list);
870e4e574b7SAdam Litke 	}
871e4e574b7SAdam Litke 	allocated += needed;
872e4e574b7SAdam Litke 
873e4e574b7SAdam Litke 	/*
874e4e574b7SAdam Litke 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
875e4e574b7SAdam Litke 	 * because either resv_huge_pages or free_huge_pages may have changed.
876e4e574b7SAdam Litke 	 */
877e4e574b7SAdam Litke 	spin_lock(&hugetlb_lock);
878a5516438SAndi Kleen 	needed = (h->resv_huge_pages + delta) -
879a5516438SAndi Kleen 			(h->free_huge_pages + allocated);
880e4e574b7SAdam Litke 	if (needed > 0)
881e4e574b7SAdam Litke 		goto retry;
882e4e574b7SAdam Litke 
883e4e574b7SAdam Litke 	/*
884e4e574b7SAdam Litke 	 * The surplus_list now contains _at_least_ the number of extra pages
885e4e574b7SAdam Litke 	 * needed to accomodate the reservation.  Add the appropriate number
886e4e574b7SAdam Litke 	 * of pages to the hugetlb pool and free the extras back to the buddy
887ac09b3a1SAdam Litke 	 * allocator.  Commit the entire reservation here to prevent another
888ac09b3a1SAdam Litke 	 * process from stealing the pages as they are added to the pool but
889ac09b3a1SAdam Litke 	 * before they are reserved.
890e4e574b7SAdam Litke 	 */
891e4e574b7SAdam Litke 	needed += allocated;
892a5516438SAndi Kleen 	h->resv_huge_pages += delta;
893e4e574b7SAdam Litke 	ret = 0;
894e4e574b7SAdam Litke free:
89519fc3f0aSAdam Litke 	/* Free the needed pages to the hugetlb pool */
89619fc3f0aSAdam Litke 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
89719fc3f0aSAdam Litke 		if ((--needed) < 0)
89819fc3f0aSAdam Litke 			break;
89919fc3f0aSAdam Litke 		list_del(&page->lru);
900a5516438SAndi Kleen 		enqueue_huge_page(h, page);
90119fc3f0aSAdam Litke 	}
90219fc3f0aSAdam Litke 
90319fc3f0aSAdam Litke 	/* Free unnecessary surplus pages to the buddy allocator */
90419fc3f0aSAdam Litke 	if (!list_empty(&surplus_list)) {
90519fc3f0aSAdam Litke 		spin_unlock(&hugetlb_lock);
906e4e574b7SAdam Litke 		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
907e4e574b7SAdam Litke 			list_del(&page->lru);
908af767cbdSAdam Litke 			/*
9092668db91SAdam Litke 			 * The page has a reference count of zero already, so
9102668db91SAdam Litke 			 * call free_huge_page directly instead of using
9112668db91SAdam Litke 			 * put_page.  This must be done with hugetlb_lock
912af767cbdSAdam Litke 			 * unlocked which is safe because free_huge_page takes
913af767cbdSAdam Litke 			 * hugetlb_lock before deciding how to free the page.
914af767cbdSAdam Litke 			 */
9152668db91SAdam Litke 			free_huge_page(page);
916af767cbdSAdam Litke 		}
91719fc3f0aSAdam Litke 		spin_lock(&hugetlb_lock);
918e4e574b7SAdam Litke 	}
919e4e574b7SAdam Litke 
920e4e574b7SAdam Litke 	return ret;
921e4e574b7SAdam Litke }
922e4e574b7SAdam Litke 
923e4e574b7SAdam Litke /*
924e4e574b7SAdam Litke  * When releasing a hugetlb pool reservation, any surplus pages that were
925e4e574b7SAdam Litke  * allocated to satisfy the reservation must be explicitly freed if they were
926e4e574b7SAdam Litke  * never used.
927685f3457SLee Schermerhorn  * Called with hugetlb_lock held.
928e4e574b7SAdam Litke  */
929a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h,
930a5516438SAndi Kleen 					unsigned long unused_resv_pages)
931e4e574b7SAdam Litke {
932e4e574b7SAdam Litke 	unsigned long nr_pages;
933e4e574b7SAdam Litke 
934ac09b3a1SAdam Litke 	/* Uncommit the reservation */
935a5516438SAndi Kleen 	h->resv_huge_pages -= unused_resv_pages;
936ac09b3a1SAdam Litke 
937aa888a74SAndi Kleen 	/* Cannot return gigantic pages currently */
938aa888a74SAndi Kleen 	if (h->order >= MAX_ORDER)
939aa888a74SAndi Kleen 		return;
940aa888a74SAndi Kleen 
941a5516438SAndi Kleen 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
942e4e574b7SAdam Litke 
943685f3457SLee Schermerhorn 	/*
944685f3457SLee Schermerhorn 	 * We want to release as many surplus pages as possible, spread
9459b5e5d0fSLee Schermerhorn 	 * evenly across all nodes with memory. Iterate across these nodes
9469b5e5d0fSLee Schermerhorn 	 * until we can no longer free unreserved surplus pages. This occurs
9479b5e5d0fSLee Schermerhorn 	 * when the nodes with surplus pages have no free pages.
9489b5e5d0fSLee Schermerhorn 	 * free_pool_huge_page() will balance the the freed pages across the
9499b5e5d0fSLee Schermerhorn 	 * on-line nodes with memory and will handle the hstate accounting.
950685f3457SLee Schermerhorn 	 */
951685f3457SLee Schermerhorn 	while (nr_pages--) {
9529b5e5d0fSLee Schermerhorn 		if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
953685f3457SLee Schermerhorn 			break;
954e4e574b7SAdam Litke 	}
955e4e574b7SAdam Litke }
956e4e574b7SAdam Litke 
957c37f9fb1SAndy Whitcroft /*
958c37f9fb1SAndy Whitcroft  * Determine if the huge page at addr within the vma has an associated
959c37f9fb1SAndy Whitcroft  * reservation.  Where it does not we will need to logically increase
960c37f9fb1SAndy Whitcroft  * reservation and actually increase quota before an allocation can occur.
961c37f9fb1SAndy Whitcroft  * Where any new reservation would be required the reservation change is
962c37f9fb1SAndy Whitcroft  * prepared, but not committed.  Once the page has been quota'd allocated
963c37f9fb1SAndy Whitcroft  * an instantiated the change should be committed via vma_commit_reservation.
964c37f9fb1SAndy Whitcroft  * No action is required on failure.
965c37f9fb1SAndy Whitcroft  */
966e2f17d94SRoel Kluin static long vma_needs_reservation(struct hstate *h,
967a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long addr)
968c37f9fb1SAndy Whitcroft {
969c37f9fb1SAndy Whitcroft 	struct address_space *mapping = vma->vm_file->f_mapping;
970c37f9fb1SAndy Whitcroft 	struct inode *inode = mapping->host;
971c37f9fb1SAndy Whitcroft 
972f83a275dSMel Gorman 	if (vma->vm_flags & VM_MAYSHARE) {
973a5516438SAndi Kleen 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
974c37f9fb1SAndy Whitcroft 		return region_chg(&inode->i_mapping->private_list,
975c37f9fb1SAndy Whitcroft 							idx, idx + 1);
976c37f9fb1SAndy Whitcroft 
97784afd99bSAndy Whitcroft 	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
978c37f9fb1SAndy Whitcroft 		return 1;
979c37f9fb1SAndy Whitcroft 
98084afd99bSAndy Whitcroft 	} else  {
981e2f17d94SRoel Kluin 		long err;
982a5516438SAndi Kleen 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
98384afd99bSAndy Whitcroft 		struct resv_map *reservations = vma_resv_map(vma);
98484afd99bSAndy Whitcroft 
98584afd99bSAndy Whitcroft 		err = region_chg(&reservations->regions, idx, idx + 1);
98684afd99bSAndy Whitcroft 		if (err < 0)
98784afd99bSAndy Whitcroft 			return err;
988c37f9fb1SAndy Whitcroft 		return 0;
989c37f9fb1SAndy Whitcroft 	}
99084afd99bSAndy Whitcroft }
991a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h,
992a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long addr)
993c37f9fb1SAndy Whitcroft {
994c37f9fb1SAndy Whitcroft 	struct address_space *mapping = vma->vm_file->f_mapping;
995c37f9fb1SAndy Whitcroft 	struct inode *inode = mapping->host;
996c37f9fb1SAndy Whitcroft 
997f83a275dSMel Gorman 	if (vma->vm_flags & VM_MAYSHARE) {
998a5516438SAndi Kleen 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
999c37f9fb1SAndy Whitcroft 		region_add(&inode->i_mapping->private_list, idx, idx + 1);
100084afd99bSAndy Whitcroft 
100184afd99bSAndy Whitcroft 	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1002a5516438SAndi Kleen 		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
100384afd99bSAndy Whitcroft 		struct resv_map *reservations = vma_resv_map(vma);
100484afd99bSAndy Whitcroft 
100584afd99bSAndy Whitcroft 		/* Mark this page used in the map. */
100684afd99bSAndy Whitcroft 		region_add(&reservations->regions, idx, idx + 1);
1007c37f9fb1SAndy Whitcroft 	}
1008c37f9fb1SAndy Whitcroft }
1009c37f9fb1SAndy Whitcroft 
1010348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma,
101104f2cbe3SMel Gorman 				    unsigned long addr, int avoid_reserve)
1012348ea204SAdam Litke {
1013a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
1014348ea204SAdam Litke 	struct page *page;
10152fc39cecSAdam Litke 	struct address_space *mapping = vma->vm_file->f_mapping;
1016a1e78772SMel Gorman 	struct inode *inode = mapping->host;
1017e2f17d94SRoel Kluin 	long chg;
10182fc39cecSAdam Litke 
1019a1e78772SMel Gorman 	/*
1020a1e78772SMel Gorman 	 * Processes that did not create the mapping will have no reserves and
1021a1e78772SMel Gorman 	 * will not have accounted against quota. Check that the quota can be
1022a1e78772SMel Gorman 	 * made before satisfying the allocation
1023c37f9fb1SAndy Whitcroft 	 * MAP_NORESERVE mappings may also need pages and quota allocated
1024c37f9fb1SAndy Whitcroft 	 * if no reserve mapping overlaps.
1025a1e78772SMel Gorman 	 */
1026a5516438SAndi Kleen 	chg = vma_needs_reservation(h, vma, addr);
1027c37f9fb1SAndy Whitcroft 	if (chg < 0)
1028c37f9fb1SAndy Whitcroft 		return ERR_PTR(chg);
1029c37f9fb1SAndy Whitcroft 	if (chg)
1030a1e78772SMel Gorman 		if (hugetlb_get_quota(inode->i_mapping, chg))
1031a1e78772SMel Gorman 			return ERR_PTR(-ENOSPC);
103290d8b7e6SAdam Litke 
1033a1e78772SMel Gorman 	spin_lock(&hugetlb_lock);
1034a5516438SAndi Kleen 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1035a1e78772SMel Gorman 	spin_unlock(&hugetlb_lock);
1036a1e78772SMel Gorman 
1037a1e78772SMel Gorman 	if (!page) {
1038a5516438SAndi Kleen 		page = alloc_buddy_huge_page(h, vma, addr);
1039a1e78772SMel Gorman 		if (!page) {
1040a1e78772SMel Gorman 			hugetlb_put_quota(inode->i_mapping, chg);
1041a1e78772SMel Gorman 			return ERR_PTR(-VM_FAULT_OOM);
1042a1e78772SMel Gorman 		}
1043a1e78772SMel Gorman 	}
1044a1e78772SMel Gorman 
1045348ea204SAdam Litke 	set_page_refcounted(page);
10462fc39cecSAdam Litke 	set_page_private(page, (unsigned long) mapping);
1047a1e78772SMel Gorman 
1048a5516438SAndi Kleen 	vma_commit_reservation(h, vma, addr);
1049c37f9fb1SAndy Whitcroft 
10507893d1d5SAdam Litke 	return page;
1051b45b5bd6SDavid Gibson }
1052b45b5bd6SDavid Gibson 
105391f47662SCyrill Gorcunov int __weak alloc_bootmem_huge_page(struct hstate *h)
1054aa888a74SAndi Kleen {
1055aa888a74SAndi Kleen 	struct huge_bootmem_page *m;
10569b5e5d0fSLee Schermerhorn 	int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1057aa888a74SAndi Kleen 
1058aa888a74SAndi Kleen 	while (nr_nodes) {
1059aa888a74SAndi Kleen 		void *addr;
1060aa888a74SAndi Kleen 
1061aa888a74SAndi Kleen 		addr = __alloc_bootmem_node_nopanic(
10626ae11b27SLee Schermerhorn 				NODE_DATA(hstate_next_node_to_alloc(h,
10639b5e5d0fSLee Schermerhorn 						&node_states[N_HIGH_MEMORY])),
1064aa888a74SAndi Kleen 				huge_page_size(h), huge_page_size(h), 0);
1065aa888a74SAndi Kleen 
1066aa888a74SAndi Kleen 		if (addr) {
1067aa888a74SAndi Kleen 			/*
1068aa888a74SAndi Kleen 			 * Use the beginning of the huge page to store the
1069aa888a74SAndi Kleen 			 * huge_bootmem_page struct (until gather_bootmem
1070aa888a74SAndi Kleen 			 * puts them into the mem_map).
1071aa888a74SAndi Kleen 			 */
1072aa888a74SAndi Kleen 			m = addr;
1073aa888a74SAndi Kleen 			goto found;
1074aa888a74SAndi Kleen 		}
1075aa888a74SAndi Kleen 		nr_nodes--;
1076aa888a74SAndi Kleen 	}
1077aa888a74SAndi Kleen 	return 0;
1078aa888a74SAndi Kleen 
1079aa888a74SAndi Kleen found:
1080aa888a74SAndi Kleen 	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1081aa888a74SAndi Kleen 	/* Put them into a private list first because mem_map is not up yet */
1082aa888a74SAndi Kleen 	list_add(&m->list, &huge_boot_pages);
1083aa888a74SAndi Kleen 	m->hstate = h;
1084aa888a74SAndi Kleen 	return 1;
1085aa888a74SAndi Kleen }
1086aa888a74SAndi Kleen 
108718229df5SAndy Whitcroft static void prep_compound_huge_page(struct page *page, int order)
108818229df5SAndy Whitcroft {
108918229df5SAndy Whitcroft 	if (unlikely(order > (MAX_ORDER - 1)))
109018229df5SAndy Whitcroft 		prep_compound_gigantic_page(page, order);
109118229df5SAndy Whitcroft 	else
109218229df5SAndy Whitcroft 		prep_compound_page(page, order);
109318229df5SAndy Whitcroft }
109418229df5SAndy Whitcroft 
1095aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */
1096aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void)
1097aa888a74SAndi Kleen {
1098aa888a74SAndi Kleen 	struct huge_bootmem_page *m;
1099aa888a74SAndi Kleen 
1100aa888a74SAndi Kleen 	list_for_each_entry(m, &huge_boot_pages, list) {
1101aa888a74SAndi Kleen 		struct page *page = virt_to_page(m);
1102aa888a74SAndi Kleen 		struct hstate *h = m->hstate;
1103aa888a74SAndi Kleen 		__ClearPageReserved(page);
1104aa888a74SAndi Kleen 		WARN_ON(page_count(page) != 1);
110518229df5SAndy Whitcroft 		prep_compound_huge_page(page, h->order);
1106aa888a74SAndi Kleen 		prep_new_huge_page(h, page, page_to_nid(page));
1107aa888a74SAndi Kleen 	}
1108aa888a74SAndi Kleen }
1109aa888a74SAndi Kleen 
11108faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
11111da177e4SLinus Torvalds {
11121da177e4SLinus Torvalds 	unsigned long i;
11131da177e4SLinus Torvalds 
1114e5ff2159SAndi Kleen 	for (i = 0; i < h->max_huge_pages; ++i) {
1115aa888a74SAndi Kleen 		if (h->order >= MAX_ORDER) {
1116aa888a74SAndi Kleen 			if (!alloc_bootmem_huge_page(h))
1117aa888a74SAndi Kleen 				break;
11189b5e5d0fSLee Schermerhorn 		} else if (!alloc_fresh_huge_page(h,
11199b5e5d0fSLee Schermerhorn 					 &node_states[N_HIGH_MEMORY]))
11201da177e4SLinus Torvalds 			break;
11211da177e4SLinus Torvalds 	}
11228faa8b07SAndi Kleen 	h->max_huge_pages = i;
1123e5ff2159SAndi Kleen }
1124e5ff2159SAndi Kleen 
1125e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void)
1126e5ff2159SAndi Kleen {
1127e5ff2159SAndi Kleen 	struct hstate *h;
1128e5ff2159SAndi Kleen 
1129e5ff2159SAndi Kleen 	for_each_hstate(h) {
11308faa8b07SAndi Kleen 		/* oversize hugepages were init'ed in early boot */
11318faa8b07SAndi Kleen 		if (h->order < MAX_ORDER)
11328faa8b07SAndi Kleen 			hugetlb_hstate_alloc_pages(h);
1133e5ff2159SAndi Kleen 	}
1134e5ff2159SAndi Kleen }
1135e5ff2159SAndi Kleen 
11364abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n)
11374abd32dbSAndi Kleen {
11384abd32dbSAndi Kleen 	if (n >= (1UL << 30))
11394abd32dbSAndi Kleen 		sprintf(buf, "%lu GB", n >> 30);
11404abd32dbSAndi Kleen 	else if (n >= (1UL << 20))
11414abd32dbSAndi Kleen 		sprintf(buf, "%lu MB", n >> 20);
11424abd32dbSAndi Kleen 	else
11434abd32dbSAndi Kleen 		sprintf(buf, "%lu KB", n >> 10);
11444abd32dbSAndi Kleen 	return buf;
11454abd32dbSAndi Kleen }
11464abd32dbSAndi Kleen 
1147e5ff2159SAndi Kleen static void __init report_hugepages(void)
1148e5ff2159SAndi Kleen {
1149e5ff2159SAndi Kleen 	struct hstate *h;
1150e5ff2159SAndi Kleen 
1151e5ff2159SAndi Kleen 	for_each_hstate(h) {
11524abd32dbSAndi Kleen 		char buf[32];
11534abd32dbSAndi Kleen 		printk(KERN_INFO "HugeTLB registered %s page size, "
11544abd32dbSAndi Kleen 				 "pre-allocated %ld pages\n",
11554abd32dbSAndi Kleen 			memfmt(buf, huge_page_size(h)),
11564abd32dbSAndi Kleen 			h->free_huge_pages);
1157e5ff2159SAndi Kleen 	}
1158e5ff2159SAndi Kleen }
1159e5ff2159SAndi Kleen 
11601da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM
11616ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count,
11626ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
11631da177e4SLinus Torvalds {
11644415cc8dSChristoph Lameter 	int i;
11654415cc8dSChristoph Lameter 
1166aa888a74SAndi Kleen 	if (h->order >= MAX_ORDER)
1167aa888a74SAndi Kleen 		return;
1168aa888a74SAndi Kleen 
11696ae11b27SLee Schermerhorn 	for_each_node_mask(i, *nodes_allowed) {
11701da177e4SLinus Torvalds 		struct page *page, *next;
1171a5516438SAndi Kleen 		struct list_head *freel = &h->hugepage_freelists[i];
1172a5516438SAndi Kleen 		list_for_each_entry_safe(page, next, freel, lru) {
1173a5516438SAndi Kleen 			if (count >= h->nr_huge_pages)
11746b0c880dSAdam Litke 				return;
11751da177e4SLinus Torvalds 			if (PageHighMem(page))
11761da177e4SLinus Torvalds 				continue;
11771da177e4SLinus Torvalds 			list_del(&page->lru);
1178e5ff2159SAndi Kleen 			update_and_free_page(h, page);
1179a5516438SAndi Kleen 			h->free_huge_pages--;
1180a5516438SAndi Kleen 			h->free_huge_pages_node[page_to_nid(page)]--;
11811da177e4SLinus Torvalds 		}
11821da177e4SLinus Torvalds 	}
11831da177e4SLinus Torvalds }
11841da177e4SLinus Torvalds #else
11856ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count,
11866ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
11871da177e4SLinus Torvalds {
11881da177e4SLinus Torvalds }
11891da177e4SLinus Torvalds #endif
11901da177e4SLinus Torvalds 
119120a0307cSWu Fengguang /*
119220a0307cSWu Fengguang  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
119320a0307cSWu Fengguang  * balanced by operating on them in a round-robin fashion.
119420a0307cSWu Fengguang  * Returns 1 if an adjustment was made.
119520a0307cSWu Fengguang  */
11966ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
11976ae11b27SLee Schermerhorn 				int delta)
119820a0307cSWu Fengguang {
1199e8c5c824SLee Schermerhorn 	int start_nid, next_nid;
120020a0307cSWu Fengguang 	int ret = 0;
120120a0307cSWu Fengguang 
120220a0307cSWu Fengguang 	VM_BUG_ON(delta != -1 && delta != 1);
120320a0307cSWu Fengguang 
1204e8c5c824SLee Schermerhorn 	if (delta < 0)
12056ae11b27SLee Schermerhorn 		start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1206e8c5c824SLee Schermerhorn 	else
12076ae11b27SLee Schermerhorn 		start_nid = hstate_next_node_to_free(h, nodes_allowed);
1208e8c5c824SLee Schermerhorn 	next_nid = start_nid;
1209e8c5c824SLee Schermerhorn 
1210e8c5c824SLee Schermerhorn 	do {
1211e8c5c824SLee Schermerhorn 		int nid = next_nid;
1212e8c5c824SLee Schermerhorn 		if (delta < 0)  {
1213e8c5c824SLee Schermerhorn 			/*
1214e8c5c824SLee Schermerhorn 			 * To shrink on this node, there must be a surplus page
1215e8c5c824SLee Schermerhorn 			 */
12169a76db09SLee Schermerhorn 			if (!h->surplus_huge_pages_node[nid]) {
12176ae11b27SLee Schermerhorn 				next_nid = hstate_next_node_to_alloc(h,
12186ae11b27SLee Schermerhorn 								nodes_allowed);
121920a0307cSWu Fengguang 				continue;
1220e8c5c824SLee Schermerhorn 			}
12219a76db09SLee Schermerhorn 		}
1222e8c5c824SLee Schermerhorn 		if (delta > 0) {
1223e8c5c824SLee Schermerhorn 			/*
1224e8c5c824SLee Schermerhorn 			 * Surplus cannot exceed the total number of pages
1225e8c5c824SLee Schermerhorn 			 */
1226e8c5c824SLee Schermerhorn 			if (h->surplus_huge_pages_node[nid] >=
12279a76db09SLee Schermerhorn 						h->nr_huge_pages_node[nid]) {
12286ae11b27SLee Schermerhorn 				next_nid = hstate_next_node_to_free(h,
12296ae11b27SLee Schermerhorn 								nodes_allowed);
123020a0307cSWu Fengguang 				continue;
1231e8c5c824SLee Schermerhorn 			}
12329a76db09SLee Schermerhorn 		}
123320a0307cSWu Fengguang 
123420a0307cSWu Fengguang 		h->surplus_huge_pages += delta;
123520a0307cSWu Fengguang 		h->surplus_huge_pages_node[nid] += delta;
123620a0307cSWu Fengguang 		ret = 1;
123720a0307cSWu Fengguang 		break;
1238e8c5c824SLee Schermerhorn 	} while (next_nid != start_nid);
123920a0307cSWu Fengguang 
124020a0307cSWu Fengguang 	return ret;
124120a0307cSWu Fengguang }
124220a0307cSWu Fengguang 
1243a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
12446ae11b27SLee Schermerhorn static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
12456ae11b27SLee Schermerhorn 						nodemask_t *nodes_allowed)
12461da177e4SLinus Torvalds {
12477893d1d5SAdam Litke 	unsigned long min_count, ret;
12481da177e4SLinus Torvalds 
1249aa888a74SAndi Kleen 	if (h->order >= MAX_ORDER)
1250aa888a74SAndi Kleen 		return h->max_huge_pages;
1251aa888a74SAndi Kleen 
12527893d1d5SAdam Litke 	/*
12537893d1d5SAdam Litke 	 * Increase the pool size
12547893d1d5SAdam Litke 	 * First take pages out of surplus state.  Then make up the
12557893d1d5SAdam Litke 	 * remaining difference by allocating fresh huge pages.
1256d1c3fb1fSNishanth Aravamudan 	 *
1257d1c3fb1fSNishanth Aravamudan 	 * We might race with alloc_buddy_huge_page() here and be unable
1258d1c3fb1fSNishanth Aravamudan 	 * to convert a surplus huge page to a normal huge page. That is
1259d1c3fb1fSNishanth Aravamudan 	 * not critical, though, it just means the overall size of the
1260d1c3fb1fSNishanth Aravamudan 	 * pool might be one hugepage larger than it needs to be, but
1261d1c3fb1fSNishanth Aravamudan 	 * within all the constraints specified by the sysctls.
12627893d1d5SAdam Litke 	 */
12631da177e4SLinus Torvalds 	spin_lock(&hugetlb_lock);
1264a5516438SAndi Kleen 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
12656ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
12667893d1d5SAdam Litke 			break;
12677893d1d5SAdam Litke 	}
12687893d1d5SAdam Litke 
1269a5516438SAndi Kleen 	while (count > persistent_huge_pages(h)) {
12707893d1d5SAdam Litke 		/*
12717893d1d5SAdam Litke 		 * If this allocation races such that we no longer need the
12727893d1d5SAdam Litke 		 * page, free_huge_page will handle it by freeing the page
12737893d1d5SAdam Litke 		 * and reducing the surplus.
12747893d1d5SAdam Litke 		 */
12757893d1d5SAdam Litke 		spin_unlock(&hugetlb_lock);
12766ae11b27SLee Schermerhorn 		ret = alloc_fresh_huge_page(h, nodes_allowed);
12777893d1d5SAdam Litke 		spin_lock(&hugetlb_lock);
12787893d1d5SAdam Litke 		if (!ret)
12797893d1d5SAdam Litke 			goto out;
12807893d1d5SAdam Litke 
12817893d1d5SAdam Litke 	}
12827893d1d5SAdam Litke 
12837893d1d5SAdam Litke 	/*
12847893d1d5SAdam Litke 	 * Decrease the pool size
12857893d1d5SAdam Litke 	 * First return free pages to the buddy allocator (being careful
12867893d1d5SAdam Litke 	 * to keep enough around to satisfy reservations).  Then place
12877893d1d5SAdam Litke 	 * pages into surplus state as needed so the pool will shrink
12887893d1d5SAdam Litke 	 * to the desired size as pages become free.
1289d1c3fb1fSNishanth Aravamudan 	 *
1290d1c3fb1fSNishanth Aravamudan 	 * By placing pages into the surplus state independent of the
1291d1c3fb1fSNishanth Aravamudan 	 * overcommit value, we are allowing the surplus pool size to
1292d1c3fb1fSNishanth Aravamudan 	 * exceed overcommit. There are few sane options here. Since
1293d1c3fb1fSNishanth Aravamudan 	 * alloc_buddy_huge_page() is checking the global counter,
1294d1c3fb1fSNishanth Aravamudan 	 * though, we'll note that we're not allowed to exceed surplus
1295d1c3fb1fSNishanth Aravamudan 	 * and won't grow the pool anywhere else. Not until one of the
1296d1c3fb1fSNishanth Aravamudan 	 * sysctls are changed, or the surplus pages go out of use.
12977893d1d5SAdam Litke 	 */
1298a5516438SAndi Kleen 	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
12996b0c880dSAdam Litke 	min_count = max(count, min_count);
13006ae11b27SLee Schermerhorn 	try_to_free_low(h, min_count, nodes_allowed);
1301a5516438SAndi Kleen 	while (min_count < persistent_huge_pages(h)) {
13026ae11b27SLee Schermerhorn 		if (!free_pool_huge_page(h, nodes_allowed, 0))
13031da177e4SLinus Torvalds 			break;
13041da177e4SLinus Torvalds 	}
1305a5516438SAndi Kleen 	while (count < persistent_huge_pages(h)) {
13066ae11b27SLee Schermerhorn 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
13077893d1d5SAdam Litke 			break;
13087893d1d5SAdam Litke 	}
13097893d1d5SAdam Litke out:
1310a5516438SAndi Kleen 	ret = persistent_huge_pages(h);
13111da177e4SLinus Torvalds 	spin_unlock(&hugetlb_lock);
13127893d1d5SAdam Litke 	return ret;
13131da177e4SLinus Torvalds }
13141da177e4SLinus Torvalds 
1315a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \
1316a3437870SNishanth Aravamudan 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1317a3437870SNishanth Aravamudan 
1318a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \
1319a3437870SNishanth Aravamudan 	static struct kobj_attribute _name##_attr = \
1320a3437870SNishanth Aravamudan 		__ATTR(_name, 0644, _name##_show, _name##_store)
1321a3437870SNishanth Aravamudan 
1322a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj;
1323a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1324a3437870SNishanth Aravamudan 
13259a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
13269a305230SLee Schermerhorn 
13279a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1328a3437870SNishanth Aravamudan {
1329a3437870SNishanth Aravamudan 	int i;
13309a305230SLee Schermerhorn 
1331a3437870SNishanth Aravamudan 	for (i = 0; i < HUGE_MAX_HSTATE; i++)
13329a305230SLee Schermerhorn 		if (hstate_kobjs[i] == kobj) {
13339a305230SLee Schermerhorn 			if (nidp)
13349a305230SLee Schermerhorn 				*nidp = NUMA_NO_NODE;
1335a3437870SNishanth Aravamudan 			return &hstates[i];
13369a305230SLee Schermerhorn 		}
13379a305230SLee Schermerhorn 
13389a305230SLee Schermerhorn 	return kobj_to_node_hstate(kobj, nidp);
1339a3437870SNishanth Aravamudan }
1340a3437870SNishanth Aravamudan 
134106808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1342a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
1343a3437870SNishanth Aravamudan {
13449a305230SLee Schermerhorn 	struct hstate *h;
13459a305230SLee Schermerhorn 	unsigned long nr_huge_pages;
13469a305230SLee Schermerhorn 	int nid;
13479a305230SLee Schermerhorn 
13489a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
13499a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
13509a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages;
13519a305230SLee Schermerhorn 	else
13529a305230SLee Schermerhorn 		nr_huge_pages = h->nr_huge_pages_node[nid];
13539a305230SLee Schermerhorn 
13549a305230SLee Schermerhorn 	return sprintf(buf, "%lu\n", nr_huge_pages);
1355a3437870SNishanth Aravamudan }
135606808b08SLee Schermerhorn static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
135706808b08SLee Schermerhorn 			struct kobject *kobj, struct kobj_attribute *attr,
135806808b08SLee Schermerhorn 			const char *buf, size_t len)
1359a3437870SNishanth Aravamudan {
1360a3437870SNishanth Aravamudan 	int err;
13619a305230SLee Schermerhorn 	int nid;
136206808b08SLee Schermerhorn 	unsigned long count;
13639a305230SLee Schermerhorn 	struct hstate *h;
1364bad44b5bSDavid Rientjes 	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1365a3437870SNishanth Aravamudan 
136606808b08SLee Schermerhorn 	err = strict_strtoul(buf, 10, &count);
1367a3437870SNishanth Aravamudan 	if (err)
1368a3437870SNishanth Aravamudan 		return 0;
1369a3437870SNishanth Aravamudan 
13709a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
13719a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE) {
13729a305230SLee Schermerhorn 		/*
13739a305230SLee Schermerhorn 		 * global hstate attribute
13749a305230SLee Schermerhorn 		 */
13759a305230SLee Schermerhorn 		if (!(obey_mempolicy &&
13769a305230SLee Schermerhorn 				init_nodemask_of_mempolicy(nodes_allowed))) {
137706808b08SLee Schermerhorn 			NODEMASK_FREE(nodes_allowed);
13789a305230SLee Schermerhorn 			nodes_allowed = &node_states[N_HIGH_MEMORY];
137906808b08SLee Schermerhorn 		}
13809a305230SLee Schermerhorn 	} else if (nodes_allowed) {
13819a305230SLee Schermerhorn 		/*
13829a305230SLee Schermerhorn 		 * per node hstate attribute: adjust count to global,
13839a305230SLee Schermerhorn 		 * but restrict alloc/free to the specified node.
13849a305230SLee Schermerhorn 		 */
13859a305230SLee Schermerhorn 		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
13869a305230SLee Schermerhorn 		init_nodemask_of_node(nodes_allowed, nid);
13879a305230SLee Schermerhorn 	} else
13889a305230SLee Schermerhorn 		nodes_allowed = &node_states[N_HIGH_MEMORY];
13899a305230SLee Schermerhorn 
139006808b08SLee Schermerhorn 	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1391a3437870SNishanth Aravamudan 
13929b5e5d0fSLee Schermerhorn 	if (nodes_allowed != &node_states[N_HIGH_MEMORY])
139306808b08SLee Schermerhorn 		NODEMASK_FREE(nodes_allowed);
139406808b08SLee Schermerhorn 
139506808b08SLee Schermerhorn 	return len;
139606808b08SLee Schermerhorn }
139706808b08SLee Schermerhorn 
139806808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj,
139906808b08SLee Schermerhorn 				       struct kobj_attribute *attr, char *buf)
140006808b08SLee Schermerhorn {
140106808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
140206808b08SLee Schermerhorn }
140306808b08SLee Schermerhorn 
140406808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj,
140506808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
140606808b08SLee Schermerhorn {
140706808b08SLee Schermerhorn 	return nr_hugepages_store_common(false, kobj, attr, buf, len);
1408a3437870SNishanth Aravamudan }
1409a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages);
1410a3437870SNishanth Aravamudan 
141106808b08SLee Schermerhorn #ifdef CONFIG_NUMA
141206808b08SLee Schermerhorn 
141306808b08SLee Schermerhorn /*
141406808b08SLee Schermerhorn  * hstate attribute for optionally mempolicy-based constraint on persistent
141506808b08SLee Schermerhorn  * huge page alloc/free.
141606808b08SLee Schermerhorn  */
141706808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
141806808b08SLee Schermerhorn 				       struct kobj_attribute *attr, char *buf)
141906808b08SLee Schermerhorn {
142006808b08SLee Schermerhorn 	return nr_hugepages_show_common(kobj, attr, buf);
142106808b08SLee Schermerhorn }
142206808b08SLee Schermerhorn 
142306808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
142406808b08SLee Schermerhorn 	       struct kobj_attribute *attr, const char *buf, size_t len)
142506808b08SLee Schermerhorn {
142606808b08SLee Schermerhorn 	return nr_hugepages_store_common(true, kobj, attr, buf, len);
142706808b08SLee Schermerhorn }
142806808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy);
142906808b08SLee Schermerhorn #endif
143006808b08SLee Schermerhorn 
143106808b08SLee Schermerhorn 
1432a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1433a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
1434a3437870SNishanth Aravamudan {
14359a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1436a3437870SNishanth Aravamudan 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1437a3437870SNishanth Aravamudan }
1438a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1439a3437870SNishanth Aravamudan 		struct kobj_attribute *attr, const char *buf, size_t count)
1440a3437870SNishanth Aravamudan {
1441a3437870SNishanth Aravamudan 	int err;
1442a3437870SNishanth Aravamudan 	unsigned long input;
14439a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1444a3437870SNishanth Aravamudan 
1445a3437870SNishanth Aravamudan 	err = strict_strtoul(buf, 10, &input);
1446a3437870SNishanth Aravamudan 	if (err)
1447a3437870SNishanth Aravamudan 		return 0;
1448a3437870SNishanth Aravamudan 
1449a3437870SNishanth Aravamudan 	spin_lock(&hugetlb_lock);
1450a3437870SNishanth Aravamudan 	h->nr_overcommit_huge_pages = input;
1451a3437870SNishanth Aravamudan 	spin_unlock(&hugetlb_lock);
1452a3437870SNishanth Aravamudan 
1453a3437870SNishanth Aravamudan 	return count;
1454a3437870SNishanth Aravamudan }
1455a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages);
1456a3437870SNishanth Aravamudan 
1457a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj,
1458a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
1459a3437870SNishanth Aravamudan {
14609a305230SLee Schermerhorn 	struct hstate *h;
14619a305230SLee Schermerhorn 	unsigned long free_huge_pages;
14629a305230SLee Schermerhorn 	int nid;
14639a305230SLee Schermerhorn 
14649a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
14659a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
14669a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages;
14679a305230SLee Schermerhorn 	else
14689a305230SLee Schermerhorn 		free_huge_pages = h->free_huge_pages_node[nid];
14699a305230SLee Schermerhorn 
14709a305230SLee Schermerhorn 	return sprintf(buf, "%lu\n", free_huge_pages);
1471a3437870SNishanth Aravamudan }
1472a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages);
1473a3437870SNishanth Aravamudan 
1474a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj,
1475a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
1476a3437870SNishanth Aravamudan {
14779a305230SLee Schermerhorn 	struct hstate *h = kobj_to_hstate(kobj, NULL);
1478a3437870SNishanth Aravamudan 	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1479a3437870SNishanth Aravamudan }
1480a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages);
1481a3437870SNishanth Aravamudan 
1482a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj,
1483a3437870SNishanth Aravamudan 					struct kobj_attribute *attr, char *buf)
1484a3437870SNishanth Aravamudan {
14859a305230SLee Schermerhorn 	struct hstate *h;
14869a305230SLee Schermerhorn 	unsigned long surplus_huge_pages;
14879a305230SLee Schermerhorn 	int nid;
14889a305230SLee Schermerhorn 
14899a305230SLee Schermerhorn 	h = kobj_to_hstate(kobj, &nid);
14909a305230SLee Schermerhorn 	if (nid == NUMA_NO_NODE)
14919a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages;
14929a305230SLee Schermerhorn 	else
14939a305230SLee Schermerhorn 		surplus_huge_pages = h->surplus_huge_pages_node[nid];
14949a305230SLee Schermerhorn 
14959a305230SLee Schermerhorn 	return sprintf(buf, "%lu\n", surplus_huge_pages);
1496a3437870SNishanth Aravamudan }
1497a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages);
1498a3437870SNishanth Aravamudan 
1499a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = {
1500a3437870SNishanth Aravamudan 	&nr_hugepages_attr.attr,
1501a3437870SNishanth Aravamudan 	&nr_overcommit_hugepages_attr.attr,
1502a3437870SNishanth Aravamudan 	&free_hugepages_attr.attr,
1503a3437870SNishanth Aravamudan 	&resv_hugepages_attr.attr,
1504a3437870SNishanth Aravamudan 	&surplus_hugepages_attr.attr,
150506808b08SLee Schermerhorn #ifdef CONFIG_NUMA
150606808b08SLee Schermerhorn 	&nr_hugepages_mempolicy_attr.attr,
150706808b08SLee Schermerhorn #endif
1508a3437870SNishanth Aravamudan 	NULL,
1509a3437870SNishanth Aravamudan };
1510a3437870SNishanth Aravamudan 
1511a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = {
1512a3437870SNishanth Aravamudan 	.attrs = hstate_attrs,
1513a3437870SNishanth Aravamudan };
1514a3437870SNishanth Aravamudan 
15159a305230SLee Schermerhorn static int __init hugetlb_sysfs_add_hstate(struct hstate *h,
15169a305230SLee Schermerhorn 				struct kobject *parent,
15179a305230SLee Schermerhorn 				struct kobject **hstate_kobjs,
15189a305230SLee Schermerhorn 				struct attribute_group *hstate_attr_group)
1519a3437870SNishanth Aravamudan {
1520a3437870SNishanth Aravamudan 	int retval;
15219a305230SLee Schermerhorn 	int hi = h - hstates;
1522a3437870SNishanth Aravamudan 
15239a305230SLee Schermerhorn 	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
15249a305230SLee Schermerhorn 	if (!hstate_kobjs[hi])
1525a3437870SNishanth Aravamudan 		return -ENOMEM;
1526a3437870SNishanth Aravamudan 
15279a305230SLee Schermerhorn 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1528a3437870SNishanth Aravamudan 	if (retval)
15299a305230SLee Schermerhorn 		kobject_put(hstate_kobjs[hi]);
1530a3437870SNishanth Aravamudan 
1531a3437870SNishanth Aravamudan 	return retval;
1532a3437870SNishanth Aravamudan }
1533a3437870SNishanth Aravamudan 
1534a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void)
1535a3437870SNishanth Aravamudan {
1536a3437870SNishanth Aravamudan 	struct hstate *h;
1537a3437870SNishanth Aravamudan 	int err;
1538a3437870SNishanth Aravamudan 
1539a3437870SNishanth Aravamudan 	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1540a3437870SNishanth Aravamudan 	if (!hugepages_kobj)
1541a3437870SNishanth Aravamudan 		return;
1542a3437870SNishanth Aravamudan 
1543a3437870SNishanth Aravamudan 	for_each_hstate(h) {
15449a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
15459a305230SLee Schermerhorn 					 hstate_kobjs, &hstate_attr_group);
1546a3437870SNishanth Aravamudan 		if (err)
1547a3437870SNishanth Aravamudan 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1548a3437870SNishanth Aravamudan 								h->name);
1549a3437870SNishanth Aravamudan 	}
1550a3437870SNishanth Aravamudan }
1551a3437870SNishanth Aravamudan 
15529a305230SLee Schermerhorn #ifdef CONFIG_NUMA
15539a305230SLee Schermerhorn 
15549a305230SLee Schermerhorn /*
15559a305230SLee Schermerhorn  * node_hstate/s - associate per node hstate attributes, via their kobjects,
15569a305230SLee Schermerhorn  * with node sysdevs in node_devices[] using a parallel array.  The array
15579a305230SLee Schermerhorn  * index of a node sysdev or _hstate == node id.
15589a305230SLee Schermerhorn  * This is here to avoid any static dependency of the node sysdev driver, in
15599a305230SLee Schermerhorn  * the base kernel, on the hugetlb module.
15609a305230SLee Schermerhorn  */
15619a305230SLee Schermerhorn struct node_hstate {
15629a305230SLee Schermerhorn 	struct kobject		*hugepages_kobj;
15639a305230SLee Schermerhorn 	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
15649a305230SLee Schermerhorn };
15659a305230SLee Schermerhorn struct node_hstate node_hstates[MAX_NUMNODES];
15669a305230SLee Schermerhorn 
15679a305230SLee Schermerhorn /*
15689a305230SLee Schermerhorn  * A subset of global hstate attributes for node sysdevs
15699a305230SLee Schermerhorn  */
15709a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = {
15719a305230SLee Schermerhorn 	&nr_hugepages_attr.attr,
15729a305230SLee Schermerhorn 	&free_hugepages_attr.attr,
15739a305230SLee Schermerhorn 	&surplus_hugepages_attr.attr,
15749a305230SLee Schermerhorn 	NULL,
15759a305230SLee Schermerhorn };
15769a305230SLee Schermerhorn 
15779a305230SLee Schermerhorn static struct attribute_group per_node_hstate_attr_group = {
15789a305230SLee Schermerhorn 	.attrs = per_node_hstate_attrs,
15799a305230SLee Schermerhorn };
15809a305230SLee Schermerhorn 
15819a305230SLee Schermerhorn /*
15829a305230SLee Schermerhorn  * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
15839a305230SLee Schermerhorn  * Returns node id via non-NULL nidp.
15849a305230SLee Schermerhorn  */
15859a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
15869a305230SLee Schermerhorn {
15879a305230SLee Schermerhorn 	int nid;
15889a305230SLee Schermerhorn 
15899a305230SLee Schermerhorn 	for (nid = 0; nid < nr_node_ids; nid++) {
15909a305230SLee Schermerhorn 		struct node_hstate *nhs = &node_hstates[nid];
15919a305230SLee Schermerhorn 		int i;
15929a305230SLee Schermerhorn 		for (i = 0; i < HUGE_MAX_HSTATE; i++)
15939a305230SLee Schermerhorn 			if (nhs->hstate_kobjs[i] == kobj) {
15949a305230SLee Schermerhorn 				if (nidp)
15959a305230SLee Schermerhorn 					*nidp = nid;
15969a305230SLee Schermerhorn 				return &hstates[i];
15979a305230SLee Schermerhorn 			}
15989a305230SLee Schermerhorn 	}
15999a305230SLee Schermerhorn 
16009a305230SLee Schermerhorn 	BUG();
16019a305230SLee Schermerhorn 	return NULL;
16029a305230SLee Schermerhorn }
16039a305230SLee Schermerhorn 
16049a305230SLee Schermerhorn /*
16059a305230SLee Schermerhorn  * Unregister hstate attributes from a single node sysdev.
16069a305230SLee Schermerhorn  * No-op if no hstate attributes attached.
16079a305230SLee Schermerhorn  */
16089a305230SLee Schermerhorn void hugetlb_unregister_node(struct node *node)
16099a305230SLee Schermerhorn {
16109a305230SLee Schermerhorn 	struct hstate *h;
16119a305230SLee Schermerhorn 	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
16129a305230SLee Schermerhorn 
16139a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
16149b5e5d0fSLee Schermerhorn 		return;		/* no hstate attributes */
16159a305230SLee Schermerhorn 
16169a305230SLee Schermerhorn 	for_each_hstate(h)
16179a305230SLee Schermerhorn 		if (nhs->hstate_kobjs[h - hstates]) {
16189a305230SLee Schermerhorn 			kobject_put(nhs->hstate_kobjs[h - hstates]);
16199a305230SLee Schermerhorn 			nhs->hstate_kobjs[h - hstates] = NULL;
16209a305230SLee Schermerhorn 		}
16219a305230SLee Schermerhorn 
16229a305230SLee Schermerhorn 	kobject_put(nhs->hugepages_kobj);
16239a305230SLee Schermerhorn 	nhs->hugepages_kobj = NULL;
16249a305230SLee Schermerhorn }
16259a305230SLee Schermerhorn 
16269a305230SLee Schermerhorn /*
16279a305230SLee Schermerhorn  * hugetlb module exit:  unregister hstate attributes from node sysdevs
16289a305230SLee Schermerhorn  * that have them.
16299a305230SLee Schermerhorn  */
16309a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void)
16319a305230SLee Schermerhorn {
16329a305230SLee Schermerhorn 	int nid;
16339a305230SLee Schermerhorn 
16349a305230SLee Schermerhorn 	/*
16359a305230SLee Schermerhorn 	 * disable node sysdev registrations.
16369a305230SLee Schermerhorn 	 */
16379a305230SLee Schermerhorn 	register_hugetlbfs_with_node(NULL, NULL);
16389a305230SLee Schermerhorn 
16399a305230SLee Schermerhorn 	/*
16409a305230SLee Schermerhorn 	 * remove hstate attributes from any nodes that have them.
16419a305230SLee Schermerhorn 	 */
16429a305230SLee Schermerhorn 	for (nid = 0; nid < nr_node_ids; nid++)
16439a305230SLee Schermerhorn 		hugetlb_unregister_node(&node_devices[nid]);
16449a305230SLee Schermerhorn }
16459a305230SLee Schermerhorn 
16469a305230SLee Schermerhorn /*
16479a305230SLee Schermerhorn  * Register hstate attributes for a single node sysdev.
16489a305230SLee Schermerhorn  * No-op if attributes already registered.
16499a305230SLee Schermerhorn  */
16509a305230SLee Schermerhorn void hugetlb_register_node(struct node *node)
16519a305230SLee Schermerhorn {
16529a305230SLee Schermerhorn 	struct hstate *h;
16539a305230SLee Schermerhorn 	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
16549a305230SLee Schermerhorn 	int err;
16559a305230SLee Schermerhorn 
16569a305230SLee Schermerhorn 	if (nhs->hugepages_kobj)
16579a305230SLee Schermerhorn 		return;		/* already allocated */
16589a305230SLee Schermerhorn 
16599a305230SLee Schermerhorn 	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
16609a305230SLee Schermerhorn 							&node->sysdev.kobj);
16619a305230SLee Schermerhorn 	if (!nhs->hugepages_kobj)
16629a305230SLee Schermerhorn 		return;
16639a305230SLee Schermerhorn 
16649a305230SLee Schermerhorn 	for_each_hstate(h) {
16659a305230SLee Schermerhorn 		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
16669a305230SLee Schermerhorn 						nhs->hstate_kobjs,
16679a305230SLee Schermerhorn 						&per_node_hstate_attr_group);
16689a305230SLee Schermerhorn 		if (err) {
16699a305230SLee Schermerhorn 			printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
16709a305230SLee Schermerhorn 					" for node %d\n",
16719a305230SLee Schermerhorn 						h->name, node->sysdev.id);
16729a305230SLee Schermerhorn 			hugetlb_unregister_node(node);
16739a305230SLee Schermerhorn 			break;
16749a305230SLee Schermerhorn 		}
16759a305230SLee Schermerhorn 	}
16769a305230SLee Schermerhorn }
16779a305230SLee Schermerhorn 
16789a305230SLee Schermerhorn /*
16799b5e5d0fSLee Schermerhorn  * hugetlb init time:  register hstate attributes for all registered node
16809b5e5d0fSLee Schermerhorn  * sysdevs of nodes that have memory.  All on-line nodes should have
16819b5e5d0fSLee Schermerhorn  * registered their associated sysdev by this time.
16829a305230SLee Schermerhorn  */
16839a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void)
16849a305230SLee Schermerhorn {
16859a305230SLee Schermerhorn 	int nid;
16869a305230SLee Schermerhorn 
16879b5e5d0fSLee Schermerhorn 	for_each_node_state(nid, N_HIGH_MEMORY) {
16889a305230SLee Schermerhorn 		struct node *node = &node_devices[nid];
16899a305230SLee Schermerhorn 		if (node->sysdev.id == nid)
16909a305230SLee Schermerhorn 			hugetlb_register_node(node);
16919a305230SLee Schermerhorn 	}
16929a305230SLee Schermerhorn 
16939a305230SLee Schermerhorn 	/*
16949a305230SLee Schermerhorn 	 * Let the node sysdev driver know we're here so it can
16959a305230SLee Schermerhorn 	 * [un]register hstate attributes on node hotplug.
16969a305230SLee Schermerhorn 	 */
16979a305230SLee Schermerhorn 	register_hugetlbfs_with_node(hugetlb_register_node,
16989a305230SLee Schermerhorn 				     hugetlb_unregister_node);
16999a305230SLee Schermerhorn }
17009a305230SLee Schermerhorn #else	/* !CONFIG_NUMA */
17019a305230SLee Schermerhorn 
17029a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
17039a305230SLee Schermerhorn {
17049a305230SLee Schermerhorn 	BUG();
17059a305230SLee Schermerhorn 	if (nidp)
17069a305230SLee Schermerhorn 		*nidp = -1;
17079a305230SLee Schermerhorn 	return NULL;
17089a305230SLee Schermerhorn }
17099a305230SLee Schermerhorn 
17109a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) { }
17119a305230SLee Schermerhorn 
17129a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { }
17139a305230SLee Schermerhorn 
17149a305230SLee Schermerhorn #endif
17159a305230SLee Schermerhorn 
1716a3437870SNishanth Aravamudan static void __exit hugetlb_exit(void)
1717a3437870SNishanth Aravamudan {
1718a3437870SNishanth Aravamudan 	struct hstate *h;
1719a3437870SNishanth Aravamudan 
17209a305230SLee Schermerhorn 	hugetlb_unregister_all_nodes();
17219a305230SLee Schermerhorn 
1722a3437870SNishanth Aravamudan 	for_each_hstate(h) {
1723a3437870SNishanth Aravamudan 		kobject_put(hstate_kobjs[h - hstates]);
1724a3437870SNishanth Aravamudan 	}
1725a3437870SNishanth Aravamudan 
1726a3437870SNishanth Aravamudan 	kobject_put(hugepages_kobj);
1727a3437870SNishanth Aravamudan }
1728a3437870SNishanth Aravamudan module_exit(hugetlb_exit);
1729a3437870SNishanth Aravamudan 
1730a3437870SNishanth Aravamudan static int __init hugetlb_init(void)
1731a3437870SNishanth Aravamudan {
17320ef89d25SBenjamin Herrenschmidt 	/* Some platform decide whether they support huge pages at boot
17330ef89d25SBenjamin Herrenschmidt 	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
17340ef89d25SBenjamin Herrenschmidt 	 * there is no such support
17350ef89d25SBenjamin Herrenschmidt 	 */
17360ef89d25SBenjamin Herrenschmidt 	if (HPAGE_SHIFT == 0)
17370ef89d25SBenjamin Herrenschmidt 		return 0;
1738a3437870SNishanth Aravamudan 
1739e11bfbfcSNick Piggin 	if (!size_to_hstate(default_hstate_size)) {
1740e11bfbfcSNick Piggin 		default_hstate_size = HPAGE_SIZE;
1741e11bfbfcSNick Piggin 		if (!size_to_hstate(default_hstate_size))
1742a3437870SNishanth Aravamudan 			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1743a3437870SNishanth Aravamudan 	}
1744e11bfbfcSNick Piggin 	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1745e11bfbfcSNick Piggin 	if (default_hstate_max_huge_pages)
1746e11bfbfcSNick Piggin 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1747a3437870SNishanth Aravamudan 
1748a3437870SNishanth Aravamudan 	hugetlb_init_hstates();
1749a3437870SNishanth Aravamudan 
1750aa888a74SAndi Kleen 	gather_bootmem_prealloc();
1751aa888a74SAndi Kleen 
1752a3437870SNishanth Aravamudan 	report_hugepages();
1753a3437870SNishanth Aravamudan 
1754a3437870SNishanth Aravamudan 	hugetlb_sysfs_init();
1755a3437870SNishanth Aravamudan 
17569a305230SLee Schermerhorn 	hugetlb_register_all_nodes();
17579a305230SLee Schermerhorn 
1758a3437870SNishanth Aravamudan 	return 0;
1759a3437870SNishanth Aravamudan }
1760a3437870SNishanth Aravamudan module_init(hugetlb_init);
1761a3437870SNishanth Aravamudan 
1762a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */
1763a3437870SNishanth Aravamudan void __init hugetlb_add_hstate(unsigned order)
1764a3437870SNishanth Aravamudan {
1765a3437870SNishanth Aravamudan 	struct hstate *h;
17668faa8b07SAndi Kleen 	unsigned long i;
17678faa8b07SAndi Kleen 
1768a3437870SNishanth Aravamudan 	if (size_to_hstate(PAGE_SIZE << order)) {
1769a3437870SNishanth Aravamudan 		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1770a3437870SNishanth Aravamudan 		return;
1771a3437870SNishanth Aravamudan 	}
1772a3437870SNishanth Aravamudan 	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1773a3437870SNishanth Aravamudan 	BUG_ON(order == 0);
1774a3437870SNishanth Aravamudan 	h = &hstates[max_hstate++];
1775a3437870SNishanth Aravamudan 	h->order = order;
1776a3437870SNishanth Aravamudan 	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
17778faa8b07SAndi Kleen 	h->nr_huge_pages = 0;
17788faa8b07SAndi Kleen 	h->free_huge_pages = 0;
17798faa8b07SAndi Kleen 	for (i = 0; i < MAX_NUMNODES; ++i)
17808faa8b07SAndi Kleen 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
17819b5e5d0fSLee Schermerhorn 	h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
17829b5e5d0fSLee Schermerhorn 	h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1783a3437870SNishanth Aravamudan 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1784a3437870SNishanth Aravamudan 					huge_page_size(h)/1024);
17858faa8b07SAndi Kleen 
1786a3437870SNishanth Aravamudan 	parsed_hstate = h;
1787a3437870SNishanth Aravamudan }
1788a3437870SNishanth Aravamudan 
1789e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s)
1790a3437870SNishanth Aravamudan {
1791a3437870SNishanth Aravamudan 	unsigned long *mhp;
17928faa8b07SAndi Kleen 	static unsigned long *last_mhp;
1793a3437870SNishanth Aravamudan 
1794a3437870SNishanth Aravamudan 	/*
1795a3437870SNishanth Aravamudan 	 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1796a3437870SNishanth Aravamudan 	 * so this hugepages= parameter goes to the "default hstate".
1797a3437870SNishanth Aravamudan 	 */
1798a3437870SNishanth Aravamudan 	if (!max_hstate)
1799a3437870SNishanth Aravamudan 		mhp = &default_hstate_max_huge_pages;
1800a3437870SNishanth Aravamudan 	else
1801a3437870SNishanth Aravamudan 		mhp = &parsed_hstate->max_huge_pages;
1802a3437870SNishanth Aravamudan 
18038faa8b07SAndi Kleen 	if (mhp == last_mhp) {
18048faa8b07SAndi Kleen 		printk(KERN_WARNING "hugepages= specified twice without "
18058faa8b07SAndi Kleen 			"interleaving hugepagesz=, ignoring\n");
18068faa8b07SAndi Kleen 		return 1;
18078faa8b07SAndi Kleen 	}
18088faa8b07SAndi Kleen 
1809a3437870SNishanth Aravamudan 	if (sscanf(s, "%lu", mhp) <= 0)
1810a3437870SNishanth Aravamudan 		*mhp = 0;
1811a3437870SNishanth Aravamudan 
18128faa8b07SAndi Kleen 	/*
18138faa8b07SAndi Kleen 	 * Global state is always initialized later in hugetlb_init.
18148faa8b07SAndi Kleen 	 * But we need to allocate >= MAX_ORDER hstates here early to still
18158faa8b07SAndi Kleen 	 * use the bootmem allocator.
18168faa8b07SAndi Kleen 	 */
18178faa8b07SAndi Kleen 	if (max_hstate && parsed_hstate->order >= MAX_ORDER)
18188faa8b07SAndi Kleen 		hugetlb_hstate_alloc_pages(parsed_hstate);
18198faa8b07SAndi Kleen 
18208faa8b07SAndi Kleen 	last_mhp = mhp;
18218faa8b07SAndi Kleen 
1822a3437870SNishanth Aravamudan 	return 1;
1823a3437870SNishanth Aravamudan }
1824e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup);
1825e11bfbfcSNick Piggin 
1826e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s)
1827e11bfbfcSNick Piggin {
1828e11bfbfcSNick Piggin 	default_hstate_size = memparse(s, &s);
1829e11bfbfcSNick Piggin 	return 1;
1830e11bfbfcSNick Piggin }
1831e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup);
1832a3437870SNishanth Aravamudan 
18338a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array)
18348a213460SNishanth Aravamudan {
18358a213460SNishanth Aravamudan 	int node;
18368a213460SNishanth Aravamudan 	unsigned int nr = 0;
18378a213460SNishanth Aravamudan 
18388a213460SNishanth Aravamudan 	for_each_node_mask(node, cpuset_current_mems_allowed)
18398a213460SNishanth Aravamudan 		nr += array[node];
18408a213460SNishanth Aravamudan 
18418a213460SNishanth Aravamudan 	return nr;
18428a213460SNishanth Aravamudan }
18438a213460SNishanth Aravamudan 
18448a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL
184506808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
184606808b08SLee Schermerhorn 			 struct ctl_table *table, int write,
184706808b08SLee Schermerhorn 			 void __user *buffer, size_t *length, loff_t *ppos)
18481da177e4SLinus Torvalds {
1849e5ff2159SAndi Kleen 	struct hstate *h = &default_hstate;
1850e5ff2159SAndi Kleen 	unsigned long tmp;
1851e5ff2159SAndi Kleen 
1852e5ff2159SAndi Kleen 	if (!write)
1853e5ff2159SAndi Kleen 		tmp = h->max_huge_pages;
1854e5ff2159SAndi Kleen 
1855e5ff2159SAndi Kleen 	table->data = &tmp;
1856e5ff2159SAndi Kleen 	table->maxlen = sizeof(unsigned long);
18578d65af78SAlexey Dobriyan 	proc_doulongvec_minmax(table, write, buffer, length, ppos);
1858e5ff2159SAndi Kleen 
185906808b08SLee Schermerhorn 	if (write) {
1860bad44b5bSDavid Rientjes 		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1861bad44b5bSDavid Rientjes 						GFP_KERNEL | __GFP_NORETRY);
186206808b08SLee Schermerhorn 		if (!(obey_mempolicy &&
186306808b08SLee Schermerhorn 			       init_nodemask_of_mempolicy(nodes_allowed))) {
186406808b08SLee Schermerhorn 			NODEMASK_FREE(nodes_allowed);
186506808b08SLee Schermerhorn 			nodes_allowed = &node_states[N_HIGH_MEMORY];
186606808b08SLee Schermerhorn 		}
186706808b08SLee Schermerhorn 		h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
186806808b08SLee Schermerhorn 
186906808b08SLee Schermerhorn 		if (nodes_allowed != &node_states[N_HIGH_MEMORY])
187006808b08SLee Schermerhorn 			NODEMASK_FREE(nodes_allowed);
187106808b08SLee Schermerhorn 	}
1872e5ff2159SAndi Kleen 
18731da177e4SLinus Torvalds 	return 0;
18741da177e4SLinus Torvalds }
1875396faf03SMel Gorman 
187606808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write,
187706808b08SLee Schermerhorn 			  void __user *buffer, size_t *length, loff_t *ppos)
187806808b08SLee Schermerhorn {
187906808b08SLee Schermerhorn 
188006808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(false, table, write,
188106808b08SLee Schermerhorn 							buffer, length, ppos);
188206808b08SLee Schermerhorn }
188306808b08SLee Schermerhorn 
188406808b08SLee Schermerhorn #ifdef CONFIG_NUMA
188506808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
188606808b08SLee Schermerhorn 			  void __user *buffer, size_t *length, loff_t *ppos)
188706808b08SLee Schermerhorn {
188806808b08SLee Schermerhorn 	return hugetlb_sysctl_handler_common(true, table, write,
188906808b08SLee Schermerhorn 							buffer, length, ppos);
189006808b08SLee Schermerhorn }
189106808b08SLee Schermerhorn #endif /* CONFIG_NUMA */
189206808b08SLee Schermerhorn 
1893396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
18948d65af78SAlexey Dobriyan 			void __user *buffer,
1895396faf03SMel Gorman 			size_t *length, loff_t *ppos)
1896396faf03SMel Gorman {
18978d65af78SAlexey Dobriyan 	proc_dointvec(table, write, buffer, length, ppos);
1898396faf03SMel Gorman 	if (hugepages_treat_as_movable)
1899396faf03SMel Gorman 		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1900396faf03SMel Gorman 	else
1901396faf03SMel Gorman 		htlb_alloc_mask = GFP_HIGHUSER;
1902396faf03SMel Gorman 	return 0;
1903396faf03SMel Gorman }
1904396faf03SMel Gorman 
1905a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write,
19068d65af78SAlexey Dobriyan 			void __user *buffer,
1907a3d0c6aaSNishanth Aravamudan 			size_t *length, loff_t *ppos)
1908a3d0c6aaSNishanth Aravamudan {
1909a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
1910e5ff2159SAndi Kleen 	unsigned long tmp;
1911e5ff2159SAndi Kleen 
1912e5ff2159SAndi Kleen 	if (!write)
1913e5ff2159SAndi Kleen 		tmp = h->nr_overcommit_huge_pages;
1914e5ff2159SAndi Kleen 
1915e5ff2159SAndi Kleen 	table->data = &tmp;
1916e5ff2159SAndi Kleen 	table->maxlen = sizeof(unsigned long);
19178d65af78SAlexey Dobriyan 	proc_doulongvec_minmax(table, write, buffer, length, ppos);
1918e5ff2159SAndi Kleen 
1919e5ff2159SAndi Kleen 	if (write) {
1920064d9efeSNishanth Aravamudan 		spin_lock(&hugetlb_lock);
1921e5ff2159SAndi Kleen 		h->nr_overcommit_huge_pages = tmp;
1922a3d0c6aaSNishanth Aravamudan 		spin_unlock(&hugetlb_lock);
1923e5ff2159SAndi Kleen 	}
1924e5ff2159SAndi Kleen 
1925a3d0c6aaSNishanth Aravamudan 	return 0;
1926a3d0c6aaSNishanth Aravamudan }
1927a3d0c6aaSNishanth Aravamudan 
19281da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */
19291da177e4SLinus Torvalds 
1930e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m)
19311da177e4SLinus Torvalds {
1932a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
1933e1759c21SAlexey Dobriyan 	seq_printf(m,
19341da177e4SLinus Torvalds 			"HugePages_Total:   %5lu\n"
19351da177e4SLinus Torvalds 			"HugePages_Free:    %5lu\n"
1936b45b5bd6SDavid Gibson 			"HugePages_Rsvd:    %5lu\n"
19377893d1d5SAdam Litke 			"HugePages_Surp:    %5lu\n"
19384f98a2feSRik van Riel 			"Hugepagesize:   %8lu kB\n",
1939a5516438SAndi Kleen 			h->nr_huge_pages,
1940a5516438SAndi Kleen 			h->free_huge_pages,
1941a5516438SAndi Kleen 			h->resv_huge_pages,
1942a5516438SAndi Kleen 			h->surplus_huge_pages,
1943a5516438SAndi Kleen 			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
19441da177e4SLinus Torvalds }
19451da177e4SLinus Torvalds 
19461da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf)
19471da177e4SLinus Torvalds {
1948a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
19491da177e4SLinus Torvalds 	return sprintf(buf,
19501da177e4SLinus Torvalds 		"Node %d HugePages_Total: %5u\n"
1951a1de0919SNishanth Aravamudan 		"Node %d HugePages_Free:  %5u\n"
1952a1de0919SNishanth Aravamudan 		"Node %d HugePages_Surp:  %5u\n",
1953a5516438SAndi Kleen 		nid, h->nr_huge_pages_node[nid],
1954a5516438SAndi Kleen 		nid, h->free_huge_pages_node[nid],
1955a5516438SAndi Kleen 		nid, h->surplus_huge_pages_node[nid]);
19561da177e4SLinus Torvalds }
19571da177e4SLinus Torvalds 
19581da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
19591da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void)
19601da177e4SLinus Torvalds {
1961a5516438SAndi Kleen 	struct hstate *h = &default_hstate;
1962a5516438SAndi Kleen 	return h->nr_huge_pages * pages_per_huge_page(h);
19631da177e4SLinus Torvalds }
19641da177e4SLinus Torvalds 
1965a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta)
1966fc1b8a73SMel Gorman {
1967fc1b8a73SMel Gorman 	int ret = -ENOMEM;
1968fc1b8a73SMel Gorman 
1969fc1b8a73SMel Gorman 	spin_lock(&hugetlb_lock);
1970fc1b8a73SMel Gorman 	/*
1971fc1b8a73SMel Gorman 	 * When cpuset is configured, it breaks the strict hugetlb page
1972fc1b8a73SMel Gorman 	 * reservation as the accounting is done on a global variable. Such
1973fc1b8a73SMel Gorman 	 * reservation is completely rubbish in the presence of cpuset because
1974fc1b8a73SMel Gorman 	 * the reservation is not checked against page availability for the
1975fc1b8a73SMel Gorman 	 * current cpuset. Application can still potentially OOM'ed by kernel
1976fc1b8a73SMel Gorman 	 * with lack of free htlb page in cpuset that the task is in.
1977fc1b8a73SMel Gorman 	 * Attempt to enforce strict accounting with cpuset is almost
1978fc1b8a73SMel Gorman 	 * impossible (or too ugly) because cpuset is too fluid that
1979fc1b8a73SMel Gorman 	 * task or memory node can be dynamically moved between cpusets.
1980fc1b8a73SMel Gorman 	 *
1981fc1b8a73SMel Gorman 	 * The change of semantics for shared hugetlb mapping with cpuset is
1982fc1b8a73SMel Gorman 	 * undesirable. However, in order to preserve some of the semantics,
1983fc1b8a73SMel Gorman 	 * we fall back to check against current free page availability as
1984fc1b8a73SMel Gorman 	 * a best attempt and hopefully to minimize the impact of changing
1985fc1b8a73SMel Gorman 	 * semantics that cpuset has.
1986fc1b8a73SMel Gorman 	 */
1987fc1b8a73SMel Gorman 	if (delta > 0) {
1988a5516438SAndi Kleen 		if (gather_surplus_pages(h, delta) < 0)
1989fc1b8a73SMel Gorman 			goto out;
1990fc1b8a73SMel Gorman 
1991a5516438SAndi Kleen 		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
1992a5516438SAndi Kleen 			return_unused_surplus_pages(h, delta);
1993fc1b8a73SMel Gorman 			goto out;
1994fc1b8a73SMel Gorman 		}
1995fc1b8a73SMel Gorman 	}
1996fc1b8a73SMel Gorman 
1997fc1b8a73SMel Gorman 	ret = 0;
1998fc1b8a73SMel Gorman 	if (delta < 0)
1999a5516438SAndi Kleen 		return_unused_surplus_pages(h, (unsigned long) -delta);
2000fc1b8a73SMel Gorman 
2001fc1b8a73SMel Gorman out:
2002fc1b8a73SMel Gorman 	spin_unlock(&hugetlb_lock);
2003fc1b8a73SMel Gorman 	return ret;
2004fc1b8a73SMel Gorman }
2005fc1b8a73SMel Gorman 
200684afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma)
200784afd99bSAndy Whitcroft {
200884afd99bSAndy Whitcroft 	struct resv_map *reservations = vma_resv_map(vma);
200984afd99bSAndy Whitcroft 
201084afd99bSAndy Whitcroft 	/*
201184afd99bSAndy Whitcroft 	 * This new VMA should share its siblings reservation map if present.
201284afd99bSAndy Whitcroft 	 * The VMA will only ever have a valid reservation map pointer where
201384afd99bSAndy Whitcroft 	 * it is being copied for another still existing VMA.  As that VMA
201484afd99bSAndy Whitcroft 	 * has a reference to the reservation map it cannot dissappear until
201584afd99bSAndy Whitcroft 	 * after this open call completes.  It is therefore safe to take a
201684afd99bSAndy Whitcroft 	 * new reference here without additional locking.
201784afd99bSAndy Whitcroft 	 */
201884afd99bSAndy Whitcroft 	if (reservations)
201984afd99bSAndy Whitcroft 		kref_get(&reservations->refs);
202084afd99bSAndy Whitcroft }
202184afd99bSAndy Whitcroft 
2022a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2023a1e78772SMel Gorman {
2024a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
202584afd99bSAndy Whitcroft 	struct resv_map *reservations = vma_resv_map(vma);
202684afd99bSAndy Whitcroft 	unsigned long reserve;
202784afd99bSAndy Whitcroft 	unsigned long start;
202884afd99bSAndy Whitcroft 	unsigned long end;
202984afd99bSAndy Whitcroft 
203084afd99bSAndy Whitcroft 	if (reservations) {
2031a5516438SAndi Kleen 		start = vma_hugecache_offset(h, vma, vma->vm_start);
2032a5516438SAndi Kleen 		end = vma_hugecache_offset(h, vma, vma->vm_end);
203384afd99bSAndy Whitcroft 
203484afd99bSAndy Whitcroft 		reserve = (end - start) -
203584afd99bSAndy Whitcroft 			region_count(&reservations->regions, start, end);
203684afd99bSAndy Whitcroft 
203784afd99bSAndy Whitcroft 		kref_put(&reservations->refs, resv_map_release);
203884afd99bSAndy Whitcroft 
20397251ff78SAdam Litke 		if (reserve) {
2040a5516438SAndi Kleen 			hugetlb_acct_memory(h, -reserve);
20417251ff78SAdam Litke 			hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
20427251ff78SAdam Litke 		}
2043a1e78772SMel Gorman 	}
204484afd99bSAndy Whitcroft }
2045a1e78772SMel Gorman 
20461da177e4SLinus Torvalds /*
20471da177e4SLinus Torvalds  * We cannot handle pagefaults against hugetlb pages at all.  They cause
20481da177e4SLinus Torvalds  * handle_mm_fault() to try to instantiate regular-sized pages in the
20491da177e4SLinus Torvalds  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
20501da177e4SLinus Torvalds  * this far.
20511da177e4SLinus Torvalds  */
2052d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
20531da177e4SLinus Torvalds {
20541da177e4SLinus Torvalds 	BUG();
2055d0217ac0SNick Piggin 	return 0;
20561da177e4SLinus Torvalds }
20571da177e4SLinus Torvalds 
2058f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = {
2059d0217ac0SNick Piggin 	.fault = hugetlb_vm_op_fault,
206084afd99bSAndy Whitcroft 	.open = hugetlb_vm_op_open,
2061a1e78772SMel Gorman 	.close = hugetlb_vm_op_close,
20621da177e4SLinus Torvalds };
20631da177e4SLinus Torvalds 
20641e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
20651e8f889bSDavid Gibson 				int writable)
206663551ae0SDavid Gibson {
206763551ae0SDavid Gibson 	pte_t entry;
206863551ae0SDavid Gibson 
20691e8f889bSDavid Gibson 	if (writable) {
207063551ae0SDavid Gibson 		entry =
207163551ae0SDavid Gibson 		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
207263551ae0SDavid Gibson 	} else {
20737f2e9525SGerald Schaefer 		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
207463551ae0SDavid Gibson 	}
207563551ae0SDavid Gibson 	entry = pte_mkyoung(entry);
207663551ae0SDavid Gibson 	entry = pte_mkhuge(entry);
207763551ae0SDavid Gibson 
207863551ae0SDavid Gibson 	return entry;
207963551ae0SDavid Gibson }
208063551ae0SDavid Gibson 
20811e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma,
20821e8f889bSDavid Gibson 				   unsigned long address, pte_t *ptep)
20831e8f889bSDavid Gibson {
20841e8f889bSDavid Gibson 	pte_t entry;
20851e8f889bSDavid Gibson 
20867f2e9525SGerald Schaefer 	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
20877f2e9525SGerald Schaefer 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
20881e8f889bSDavid Gibson 		update_mmu_cache(vma, address, entry);
20891e8f889bSDavid Gibson 	}
20908dab5241SBenjamin Herrenschmidt }
20911e8f889bSDavid Gibson 
20921e8f889bSDavid Gibson 
209363551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
209463551ae0SDavid Gibson 			    struct vm_area_struct *vma)
209563551ae0SDavid Gibson {
209663551ae0SDavid Gibson 	pte_t *src_pte, *dst_pte, entry;
209763551ae0SDavid Gibson 	struct page *ptepage;
20981c59827dSHugh Dickins 	unsigned long addr;
20991e8f889bSDavid Gibson 	int cow;
2100a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
2101a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
21021e8f889bSDavid Gibson 
21031e8f889bSDavid Gibson 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
210463551ae0SDavid Gibson 
2105a5516438SAndi Kleen 	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2106c74df32cSHugh Dickins 		src_pte = huge_pte_offset(src, addr);
2107c74df32cSHugh Dickins 		if (!src_pte)
2108c74df32cSHugh Dickins 			continue;
2109a5516438SAndi Kleen 		dst_pte = huge_pte_alloc(dst, addr, sz);
211063551ae0SDavid Gibson 		if (!dst_pte)
211163551ae0SDavid Gibson 			goto nomem;
2112c5c99429SLarry Woodman 
2113c5c99429SLarry Woodman 		/* If the pagetables are shared don't copy or take references */
2114c5c99429SLarry Woodman 		if (dst_pte == src_pte)
2115c5c99429SLarry Woodman 			continue;
2116c5c99429SLarry Woodman 
2117c74df32cSHugh Dickins 		spin_lock(&dst->page_table_lock);
211846478758SNick Piggin 		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
21197f2e9525SGerald Schaefer 		if (!huge_pte_none(huge_ptep_get(src_pte))) {
21201e8f889bSDavid Gibson 			if (cow)
21217f2e9525SGerald Schaefer 				huge_ptep_set_wrprotect(src, addr, src_pte);
21227f2e9525SGerald Schaefer 			entry = huge_ptep_get(src_pte);
212363551ae0SDavid Gibson 			ptepage = pte_page(entry);
212463551ae0SDavid Gibson 			get_page(ptepage);
212563551ae0SDavid Gibson 			set_huge_pte_at(dst, addr, dst_pte, entry);
21261c59827dSHugh Dickins 		}
21271c59827dSHugh Dickins 		spin_unlock(&src->page_table_lock);
2128c74df32cSHugh Dickins 		spin_unlock(&dst->page_table_lock);
212963551ae0SDavid Gibson 	}
213063551ae0SDavid Gibson 	return 0;
213163551ae0SDavid Gibson 
213263551ae0SDavid Gibson nomem:
213363551ae0SDavid Gibson 	return -ENOMEM;
213463551ae0SDavid Gibson }
213563551ae0SDavid Gibson 
2136502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
213704f2cbe3SMel Gorman 			    unsigned long end, struct page *ref_page)
213863551ae0SDavid Gibson {
213963551ae0SDavid Gibson 	struct mm_struct *mm = vma->vm_mm;
214063551ae0SDavid Gibson 	unsigned long address;
2141c7546f8fSDavid Gibson 	pte_t *ptep;
214263551ae0SDavid Gibson 	pte_t pte;
214363551ae0SDavid Gibson 	struct page *page;
2144fe1668aeSChen, Kenneth W 	struct page *tmp;
2145a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
2146a5516438SAndi Kleen 	unsigned long sz = huge_page_size(h);
2147a5516438SAndi Kleen 
2148c0a499c2SChen, Kenneth W 	/*
2149c0a499c2SChen, Kenneth W 	 * A page gathering list, protected by per file i_mmap_lock. The
2150c0a499c2SChen, Kenneth W 	 * lock is used to avoid list corruption from multiple unmapping
2151c0a499c2SChen, Kenneth W 	 * of the same page since we are using page->lru.
2152c0a499c2SChen, Kenneth W 	 */
2153fe1668aeSChen, Kenneth W 	LIST_HEAD(page_list);
215463551ae0SDavid Gibson 
215563551ae0SDavid Gibson 	WARN_ON(!is_vm_hugetlb_page(vma));
2156a5516438SAndi Kleen 	BUG_ON(start & ~huge_page_mask(h));
2157a5516438SAndi Kleen 	BUG_ON(end & ~huge_page_mask(h));
215863551ae0SDavid Gibson 
2159cddb8a5cSAndrea Arcangeli 	mmu_notifier_invalidate_range_start(mm, start, end);
2160508034a3SHugh Dickins 	spin_lock(&mm->page_table_lock);
2161a5516438SAndi Kleen 	for (address = start; address < end; address += sz) {
2162c7546f8fSDavid Gibson 		ptep = huge_pte_offset(mm, address);
2163c7546f8fSDavid Gibson 		if (!ptep)
2164c7546f8fSDavid Gibson 			continue;
2165c7546f8fSDavid Gibson 
216639dde65cSChen, Kenneth W 		if (huge_pmd_unshare(mm, &address, ptep))
216739dde65cSChen, Kenneth W 			continue;
216839dde65cSChen, Kenneth W 
216904f2cbe3SMel Gorman 		/*
217004f2cbe3SMel Gorman 		 * If a reference page is supplied, it is because a specific
217104f2cbe3SMel Gorman 		 * page is being unmapped, not a range. Ensure the page we
217204f2cbe3SMel Gorman 		 * are about to unmap is the actual page of interest.
217304f2cbe3SMel Gorman 		 */
217404f2cbe3SMel Gorman 		if (ref_page) {
217504f2cbe3SMel Gorman 			pte = huge_ptep_get(ptep);
217604f2cbe3SMel Gorman 			if (huge_pte_none(pte))
217704f2cbe3SMel Gorman 				continue;
217804f2cbe3SMel Gorman 			page = pte_page(pte);
217904f2cbe3SMel Gorman 			if (page != ref_page)
218004f2cbe3SMel Gorman 				continue;
218104f2cbe3SMel Gorman 
218204f2cbe3SMel Gorman 			/*
218304f2cbe3SMel Gorman 			 * Mark the VMA as having unmapped its page so that
218404f2cbe3SMel Gorman 			 * future faults in this VMA will fail rather than
218504f2cbe3SMel Gorman 			 * looking like data was lost
218604f2cbe3SMel Gorman 			 */
218704f2cbe3SMel Gorman 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
218804f2cbe3SMel Gorman 		}
218904f2cbe3SMel Gorman 
2190c7546f8fSDavid Gibson 		pte = huge_ptep_get_and_clear(mm, address, ptep);
21917f2e9525SGerald Schaefer 		if (huge_pte_none(pte))
219263551ae0SDavid Gibson 			continue;
2193c7546f8fSDavid Gibson 
219463551ae0SDavid Gibson 		page = pte_page(pte);
21956649a386SKen Chen 		if (pte_dirty(pte))
21966649a386SKen Chen 			set_page_dirty(page);
2197fe1668aeSChen, Kenneth W 		list_add(&page->lru, &page_list);
219863551ae0SDavid Gibson 	}
21991da177e4SLinus Torvalds 	spin_unlock(&mm->page_table_lock);
2200508034a3SHugh Dickins 	flush_tlb_range(vma, start, end);
2201cddb8a5cSAndrea Arcangeli 	mmu_notifier_invalidate_range_end(mm, start, end);
2202fe1668aeSChen, Kenneth W 	list_for_each_entry_safe(page, tmp, &page_list, lru) {
2203fe1668aeSChen, Kenneth W 		list_del(&page->lru);
2204fe1668aeSChen, Kenneth W 		put_page(page);
2205fe1668aeSChen, Kenneth W 	}
22061da177e4SLinus Torvalds }
220763551ae0SDavid Gibson 
2208502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
220904f2cbe3SMel Gorman 			  unsigned long end, struct page *ref_page)
2210502717f4SChen, Kenneth W {
2211502717f4SChen, Kenneth W 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
221204f2cbe3SMel Gorman 	__unmap_hugepage_range(vma, start, end, ref_page);
2213502717f4SChen, Kenneth W 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2214502717f4SChen, Kenneth W }
2215502717f4SChen, Kenneth W 
221604f2cbe3SMel Gorman /*
221704f2cbe3SMel Gorman  * This is called when the original mapper is failing to COW a MAP_PRIVATE
221804f2cbe3SMel Gorman  * mappping it owns the reserve page for. The intention is to unmap the page
221904f2cbe3SMel Gorman  * from other VMAs and let the children be SIGKILLed if they are faulting the
222004f2cbe3SMel Gorman  * same region.
222104f2cbe3SMel Gorman  */
22222a4b3dedSHarvey Harrison static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
22232a4b3dedSHarvey Harrison 				struct page *page, unsigned long address)
222404f2cbe3SMel Gorman {
22257526674dSAdam Litke 	struct hstate *h = hstate_vma(vma);
222604f2cbe3SMel Gorman 	struct vm_area_struct *iter_vma;
222704f2cbe3SMel Gorman 	struct address_space *mapping;
222804f2cbe3SMel Gorman 	struct prio_tree_iter iter;
222904f2cbe3SMel Gorman 	pgoff_t pgoff;
223004f2cbe3SMel Gorman 
223104f2cbe3SMel Gorman 	/*
223204f2cbe3SMel Gorman 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
223304f2cbe3SMel Gorman 	 * from page cache lookup which is in HPAGE_SIZE units.
223404f2cbe3SMel Gorman 	 */
22357526674dSAdam Litke 	address = address & huge_page_mask(h);
223604f2cbe3SMel Gorman 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
223704f2cbe3SMel Gorman 		+ (vma->vm_pgoff >> PAGE_SHIFT);
223804f2cbe3SMel Gorman 	mapping = (struct address_space *)page_private(page);
223904f2cbe3SMel Gorman 
224004f2cbe3SMel Gorman 	vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
224104f2cbe3SMel Gorman 		/* Do not unmap the current VMA */
224204f2cbe3SMel Gorman 		if (iter_vma == vma)
224304f2cbe3SMel Gorman 			continue;
224404f2cbe3SMel Gorman 
224504f2cbe3SMel Gorman 		/*
224604f2cbe3SMel Gorman 		 * Unmap the page from other VMAs without their own reserves.
224704f2cbe3SMel Gorman 		 * They get marked to be SIGKILLed if they fault in these
224804f2cbe3SMel Gorman 		 * areas. This is because a future no-page fault on this VMA
224904f2cbe3SMel Gorman 		 * could insert a zeroed page instead of the data existing
225004f2cbe3SMel Gorman 		 * from the time of fork. This would look like data corruption
225104f2cbe3SMel Gorman 		 */
225204f2cbe3SMel Gorman 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
225304f2cbe3SMel Gorman 			unmap_hugepage_range(iter_vma,
22547526674dSAdam Litke 				address, address + huge_page_size(h),
225504f2cbe3SMel Gorman 				page);
225604f2cbe3SMel Gorman 	}
225704f2cbe3SMel Gorman 
225804f2cbe3SMel Gorman 	return 1;
225904f2cbe3SMel Gorman }
226004f2cbe3SMel Gorman 
22611e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
226204f2cbe3SMel Gorman 			unsigned long address, pte_t *ptep, pte_t pte,
226304f2cbe3SMel Gorman 			struct page *pagecache_page)
22641e8f889bSDavid Gibson {
2265a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
22661e8f889bSDavid Gibson 	struct page *old_page, *new_page;
226779ac6ba4SDavid Gibson 	int avoidcopy;
226804f2cbe3SMel Gorman 	int outside_reserve = 0;
22691e8f889bSDavid Gibson 
22701e8f889bSDavid Gibson 	old_page = pte_page(pte);
22711e8f889bSDavid Gibson 
227204f2cbe3SMel Gorman retry_avoidcopy:
22731e8f889bSDavid Gibson 	/* If no-one else is actually using this page, avoid the copy
22741e8f889bSDavid Gibson 	 * and just make the page writable */
22751e8f889bSDavid Gibson 	avoidcopy = (page_count(old_page) == 1);
22761e8f889bSDavid Gibson 	if (avoidcopy) {
22771e8f889bSDavid Gibson 		set_huge_ptep_writable(vma, address, ptep);
227883c54070SNick Piggin 		return 0;
22791e8f889bSDavid Gibson 	}
22801e8f889bSDavid Gibson 
228104f2cbe3SMel Gorman 	/*
228204f2cbe3SMel Gorman 	 * If the process that created a MAP_PRIVATE mapping is about to
228304f2cbe3SMel Gorman 	 * perform a COW due to a shared page count, attempt to satisfy
228404f2cbe3SMel Gorman 	 * the allocation without using the existing reserves. The pagecache
228504f2cbe3SMel Gorman 	 * page is used to determine if the reserve at this address was
228604f2cbe3SMel Gorman 	 * consumed or not. If reserves were used, a partial faulted mapping
228704f2cbe3SMel Gorman 	 * at the time of fork() could consume its reserves on COW instead
228804f2cbe3SMel Gorman 	 * of the full address range.
228904f2cbe3SMel Gorman 	 */
2290f83a275dSMel Gorman 	if (!(vma->vm_flags & VM_MAYSHARE) &&
229104f2cbe3SMel Gorman 			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
229204f2cbe3SMel Gorman 			old_page != pagecache_page)
229304f2cbe3SMel Gorman 		outside_reserve = 1;
229404f2cbe3SMel Gorman 
22951e8f889bSDavid Gibson 	page_cache_get(old_page);
2296b76c8cfbSLarry Woodman 
2297b76c8cfbSLarry Woodman 	/* Drop page_table_lock as buddy allocator may be called */
2298b76c8cfbSLarry Woodman 	spin_unlock(&mm->page_table_lock);
229904f2cbe3SMel Gorman 	new_page = alloc_huge_page(vma, address, outside_reserve);
23001e8f889bSDavid Gibson 
23012fc39cecSAdam Litke 	if (IS_ERR(new_page)) {
23021e8f889bSDavid Gibson 		page_cache_release(old_page);
230304f2cbe3SMel Gorman 
230404f2cbe3SMel Gorman 		/*
230504f2cbe3SMel Gorman 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
230604f2cbe3SMel Gorman 		 * it is due to references held by a child and an insufficient
230704f2cbe3SMel Gorman 		 * huge page pool. To guarantee the original mappers
230804f2cbe3SMel Gorman 		 * reliability, unmap the page from child processes. The child
230904f2cbe3SMel Gorman 		 * may get SIGKILLed if it later faults.
231004f2cbe3SMel Gorman 		 */
231104f2cbe3SMel Gorman 		if (outside_reserve) {
231204f2cbe3SMel Gorman 			BUG_ON(huge_pte_none(pte));
231304f2cbe3SMel Gorman 			if (unmap_ref_private(mm, vma, old_page, address)) {
231404f2cbe3SMel Gorman 				BUG_ON(page_count(old_page) != 1);
231504f2cbe3SMel Gorman 				BUG_ON(huge_pte_none(pte));
2316b76c8cfbSLarry Woodman 				spin_lock(&mm->page_table_lock);
231704f2cbe3SMel Gorman 				goto retry_avoidcopy;
231804f2cbe3SMel Gorman 			}
231904f2cbe3SMel Gorman 			WARN_ON_ONCE(1);
232004f2cbe3SMel Gorman 		}
232104f2cbe3SMel Gorman 
2322b76c8cfbSLarry Woodman 		/* Caller expects lock to be held */
2323b76c8cfbSLarry Woodman 		spin_lock(&mm->page_table_lock);
23242fc39cecSAdam Litke 		return -PTR_ERR(new_page);
23251e8f889bSDavid Gibson 	}
23261e8f889bSDavid Gibson 
23279de455b2SAtsushi Nemoto 	copy_huge_page(new_page, old_page, address, vma);
23280ed361deSNick Piggin 	__SetPageUptodate(new_page);
23291e8f889bSDavid Gibson 
2330b76c8cfbSLarry Woodman 	/*
2331b76c8cfbSLarry Woodman 	 * Retake the page_table_lock to check for racing updates
2332b76c8cfbSLarry Woodman 	 * before the page tables are altered
2333b76c8cfbSLarry Woodman 	 */
2334b76c8cfbSLarry Woodman 	spin_lock(&mm->page_table_lock);
2335a5516438SAndi Kleen 	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
23367f2e9525SGerald Schaefer 	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
23371e8f889bSDavid Gibson 		/* Break COW */
23388fe627ecSGerald Schaefer 		huge_ptep_clear_flush(vma, address, ptep);
23391e8f889bSDavid Gibson 		set_huge_pte_at(mm, address, ptep,
23401e8f889bSDavid Gibson 				make_huge_pte(vma, new_page, 1));
23411e8f889bSDavid Gibson 		/* Make the old page be freed below */
23421e8f889bSDavid Gibson 		new_page = old_page;
23431e8f889bSDavid Gibson 	}
23441e8f889bSDavid Gibson 	page_cache_release(new_page);
23451e8f889bSDavid Gibson 	page_cache_release(old_page);
234683c54070SNick Piggin 	return 0;
23471e8f889bSDavid Gibson }
23481e8f889bSDavid Gibson 
234904f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */
2350a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2351a5516438SAndi Kleen 			struct vm_area_struct *vma, unsigned long address)
235204f2cbe3SMel Gorman {
235304f2cbe3SMel Gorman 	struct address_space *mapping;
2354e7c4b0bfSAndy Whitcroft 	pgoff_t idx;
235504f2cbe3SMel Gorman 
235604f2cbe3SMel Gorman 	mapping = vma->vm_file->f_mapping;
2357a5516438SAndi Kleen 	idx = vma_hugecache_offset(h, vma, address);
235804f2cbe3SMel Gorman 
235904f2cbe3SMel Gorman 	return find_lock_page(mapping, idx);
236004f2cbe3SMel Gorman }
236104f2cbe3SMel Gorman 
23623ae77f43SHugh Dickins /*
23633ae77f43SHugh Dickins  * Return whether there is a pagecache page to back given address within VMA.
23643ae77f43SHugh Dickins  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
23653ae77f43SHugh Dickins  */
23663ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h,
23672a15efc9SHugh Dickins 			struct vm_area_struct *vma, unsigned long address)
23682a15efc9SHugh Dickins {
23692a15efc9SHugh Dickins 	struct address_space *mapping;
23702a15efc9SHugh Dickins 	pgoff_t idx;
23712a15efc9SHugh Dickins 	struct page *page;
23722a15efc9SHugh Dickins 
23732a15efc9SHugh Dickins 	mapping = vma->vm_file->f_mapping;
23742a15efc9SHugh Dickins 	idx = vma_hugecache_offset(h, vma, address);
23752a15efc9SHugh Dickins 
23762a15efc9SHugh Dickins 	page = find_get_page(mapping, idx);
23772a15efc9SHugh Dickins 	if (page)
23782a15efc9SHugh Dickins 		put_page(page);
23792a15efc9SHugh Dickins 	return page != NULL;
23802a15efc9SHugh Dickins }
23812a15efc9SHugh Dickins 
2382a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2383788c7df4SHugh Dickins 			unsigned long address, pte_t *ptep, unsigned int flags)
2384ac9b9c66SHugh Dickins {
2385a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
2386ac9b9c66SHugh Dickins 	int ret = VM_FAULT_SIGBUS;
2387e7c4b0bfSAndy Whitcroft 	pgoff_t idx;
23884c887265SAdam Litke 	unsigned long size;
23894c887265SAdam Litke 	struct page *page;
23904c887265SAdam Litke 	struct address_space *mapping;
23911e8f889bSDavid Gibson 	pte_t new_pte;
23924c887265SAdam Litke 
239304f2cbe3SMel Gorman 	/*
239404f2cbe3SMel Gorman 	 * Currently, we are forced to kill the process in the event the
239504f2cbe3SMel Gorman 	 * original mapper has unmapped pages from the child due to a failed
239604f2cbe3SMel Gorman 	 * COW. Warn that such a situation has occured as it may not be obvious
239704f2cbe3SMel Gorman 	 */
239804f2cbe3SMel Gorman 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
239904f2cbe3SMel Gorman 		printk(KERN_WARNING
240004f2cbe3SMel Gorman 			"PID %d killed due to inadequate hugepage pool\n",
240104f2cbe3SMel Gorman 			current->pid);
240204f2cbe3SMel Gorman 		return ret;
240304f2cbe3SMel Gorman 	}
240404f2cbe3SMel Gorman 
24054c887265SAdam Litke 	mapping = vma->vm_file->f_mapping;
2406a5516438SAndi Kleen 	idx = vma_hugecache_offset(h, vma, address);
24074c887265SAdam Litke 
24084c887265SAdam Litke 	/*
24094c887265SAdam Litke 	 * Use page lock to guard against racing truncation
24104c887265SAdam Litke 	 * before we get page_table_lock.
24114c887265SAdam Litke 	 */
24126bda666aSChristoph Lameter retry:
24136bda666aSChristoph Lameter 	page = find_lock_page(mapping, idx);
24146bda666aSChristoph Lameter 	if (!page) {
2415a5516438SAndi Kleen 		size = i_size_read(mapping->host) >> huge_page_shift(h);
2416ebed4bfcSHugh Dickins 		if (idx >= size)
2417ebed4bfcSHugh Dickins 			goto out;
241804f2cbe3SMel Gorman 		page = alloc_huge_page(vma, address, 0);
24192fc39cecSAdam Litke 		if (IS_ERR(page)) {
24202fc39cecSAdam Litke 			ret = -PTR_ERR(page);
24216bda666aSChristoph Lameter 			goto out;
24226bda666aSChristoph Lameter 		}
2423a5516438SAndi Kleen 		clear_huge_page(page, address, huge_page_size(h));
24240ed361deSNick Piggin 		__SetPageUptodate(page);
2425ac9b9c66SHugh Dickins 
2426f83a275dSMel Gorman 		if (vma->vm_flags & VM_MAYSHARE) {
24276bda666aSChristoph Lameter 			int err;
242845c682a6SKen Chen 			struct inode *inode = mapping->host;
24296bda666aSChristoph Lameter 
24306bda666aSChristoph Lameter 			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
24316bda666aSChristoph Lameter 			if (err) {
24326bda666aSChristoph Lameter 				put_page(page);
24336bda666aSChristoph Lameter 				if (err == -EEXIST)
24346bda666aSChristoph Lameter 					goto retry;
24356bda666aSChristoph Lameter 				goto out;
24366bda666aSChristoph Lameter 			}
243745c682a6SKen Chen 
243845c682a6SKen Chen 			spin_lock(&inode->i_lock);
2439a5516438SAndi Kleen 			inode->i_blocks += blocks_per_huge_page(h);
244045c682a6SKen Chen 			spin_unlock(&inode->i_lock);
24416bda666aSChristoph Lameter 		} else
24426bda666aSChristoph Lameter 			lock_page(page);
24436bda666aSChristoph Lameter 	}
24441e8f889bSDavid Gibson 
244557303d80SAndy Whitcroft 	/*
244657303d80SAndy Whitcroft 	 * If we are going to COW a private mapping later, we examine the
244757303d80SAndy Whitcroft 	 * pending reservations for this page now. This will ensure that
244857303d80SAndy Whitcroft 	 * any allocations necessary to record that reservation occur outside
244957303d80SAndy Whitcroft 	 * the spinlock.
245057303d80SAndy Whitcroft 	 */
2451788c7df4SHugh Dickins 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
24522b26736cSAndy Whitcroft 		if (vma_needs_reservation(h, vma, address) < 0) {
24532b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
24542b26736cSAndy Whitcroft 			goto backout_unlocked;
24552b26736cSAndy Whitcroft 		}
245657303d80SAndy Whitcroft 
2457ac9b9c66SHugh Dickins 	spin_lock(&mm->page_table_lock);
2458a5516438SAndi Kleen 	size = i_size_read(mapping->host) >> huge_page_shift(h);
24594c887265SAdam Litke 	if (idx >= size)
24604c887265SAdam Litke 		goto backout;
24614c887265SAdam Litke 
246283c54070SNick Piggin 	ret = 0;
24637f2e9525SGerald Schaefer 	if (!huge_pte_none(huge_ptep_get(ptep)))
24644c887265SAdam Litke 		goto backout;
24654c887265SAdam Litke 
24661e8f889bSDavid Gibson 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
24671e8f889bSDavid Gibson 				&& (vma->vm_flags & VM_SHARED)));
24681e8f889bSDavid Gibson 	set_huge_pte_at(mm, address, ptep, new_pte);
24691e8f889bSDavid Gibson 
2470788c7df4SHugh Dickins 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
24711e8f889bSDavid Gibson 		/* Optimization, do the COW without a second fault */
247204f2cbe3SMel Gorman 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
24731e8f889bSDavid Gibson 	}
24741e8f889bSDavid Gibson 
2475ac9b9c66SHugh Dickins 	spin_unlock(&mm->page_table_lock);
24764c887265SAdam Litke 	unlock_page(page);
24774c887265SAdam Litke out:
2478ac9b9c66SHugh Dickins 	return ret;
24794c887265SAdam Litke 
24804c887265SAdam Litke backout:
24814c887265SAdam Litke 	spin_unlock(&mm->page_table_lock);
24822b26736cSAndy Whitcroft backout_unlocked:
24834c887265SAdam Litke 	unlock_page(page);
24844c887265SAdam Litke 	put_page(page);
24854c887265SAdam Litke 	goto out;
2486ac9b9c66SHugh Dickins }
2487ac9b9c66SHugh Dickins 
248886e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2489788c7df4SHugh Dickins 			unsigned long address, unsigned int flags)
249086e5216fSAdam Litke {
249186e5216fSAdam Litke 	pte_t *ptep;
249286e5216fSAdam Litke 	pte_t entry;
24931e8f889bSDavid Gibson 	int ret;
249457303d80SAndy Whitcroft 	struct page *pagecache_page = NULL;
24953935baa9SDavid Gibson 	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2496a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
249786e5216fSAdam Litke 
2498a5516438SAndi Kleen 	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
249986e5216fSAdam Litke 	if (!ptep)
250086e5216fSAdam Litke 		return VM_FAULT_OOM;
250186e5216fSAdam Litke 
25023935baa9SDavid Gibson 	/*
25033935baa9SDavid Gibson 	 * Serialize hugepage allocation and instantiation, so that we don't
25043935baa9SDavid Gibson 	 * get spurious allocation failures if two CPUs race to instantiate
25053935baa9SDavid Gibson 	 * the same page in the page cache.
25063935baa9SDavid Gibson 	 */
25073935baa9SDavid Gibson 	mutex_lock(&hugetlb_instantiation_mutex);
25087f2e9525SGerald Schaefer 	entry = huge_ptep_get(ptep);
25097f2e9525SGerald Schaefer 	if (huge_pte_none(entry)) {
2510788c7df4SHugh Dickins 		ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2511b4d1d99fSDavid Gibson 		goto out_mutex;
25123935baa9SDavid Gibson 	}
251386e5216fSAdam Litke 
251483c54070SNick Piggin 	ret = 0;
25151e8f889bSDavid Gibson 
251657303d80SAndy Whitcroft 	/*
251757303d80SAndy Whitcroft 	 * If we are going to COW the mapping later, we examine the pending
251857303d80SAndy Whitcroft 	 * reservations for this page now. This will ensure that any
251957303d80SAndy Whitcroft 	 * allocations necessary to record that reservation occur outside the
252057303d80SAndy Whitcroft 	 * spinlock. For private mappings, we also lookup the pagecache
252157303d80SAndy Whitcroft 	 * page now as it is used to determine if a reservation has been
252257303d80SAndy Whitcroft 	 * consumed.
252357303d80SAndy Whitcroft 	 */
2524788c7df4SHugh Dickins 	if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
25252b26736cSAndy Whitcroft 		if (vma_needs_reservation(h, vma, address) < 0) {
25262b26736cSAndy Whitcroft 			ret = VM_FAULT_OOM;
2527b4d1d99fSDavid Gibson 			goto out_mutex;
25282b26736cSAndy Whitcroft 		}
252957303d80SAndy Whitcroft 
2530f83a275dSMel Gorman 		if (!(vma->vm_flags & VM_MAYSHARE))
253157303d80SAndy Whitcroft 			pagecache_page = hugetlbfs_pagecache_page(h,
253257303d80SAndy Whitcroft 								vma, address);
253357303d80SAndy Whitcroft 	}
253457303d80SAndy Whitcroft 
25351e8f889bSDavid Gibson 	spin_lock(&mm->page_table_lock);
25361e8f889bSDavid Gibson 	/* Check for a racing update before calling hugetlb_cow */
2537b4d1d99fSDavid Gibson 	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2538b4d1d99fSDavid Gibson 		goto out_page_table_lock;
2539b4d1d99fSDavid Gibson 
2540b4d1d99fSDavid Gibson 
2541788c7df4SHugh Dickins 	if (flags & FAULT_FLAG_WRITE) {
2542b4d1d99fSDavid Gibson 		if (!pte_write(entry)) {
254357303d80SAndy Whitcroft 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
254457303d80SAndy Whitcroft 							pagecache_page);
2545b4d1d99fSDavid Gibson 			goto out_page_table_lock;
2546b4d1d99fSDavid Gibson 		}
2547b4d1d99fSDavid Gibson 		entry = pte_mkdirty(entry);
2548b4d1d99fSDavid Gibson 	}
2549b4d1d99fSDavid Gibson 	entry = pte_mkyoung(entry);
2550788c7df4SHugh Dickins 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2551788c7df4SHugh Dickins 						flags & FAULT_FLAG_WRITE))
2552b4d1d99fSDavid Gibson 		update_mmu_cache(vma, address, entry);
2553b4d1d99fSDavid Gibson 
2554b4d1d99fSDavid Gibson out_page_table_lock:
25551e8f889bSDavid Gibson 	spin_unlock(&mm->page_table_lock);
255657303d80SAndy Whitcroft 
255757303d80SAndy Whitcroft 	if (pagecache_page) {
255857303d80SAndy Whitcroft 		unlock_page(pagecache_page);
255957303d80SAndy Whitcroft 		put_page(pagecache_page);
256057303d80SAndy Whitcroft 	}
256157303d80SAndy Whitcroft 
2562b4d1d99fSDavid Gibson out_mutex:
25633935baa9SDavid Gibson 	mutex_unlock(&hugetlb_instantiation_mutex);
25641e8f889bSDavid Gibson 
25651e8f889bSDavid Gibson 	return ret;
256686e5216fSAdam Litke }
256786e5216fSAdam Litke 
2568ceb86879SAndi Kleen /* Can be overriden by architectures */
2569ceb86879SAndi Kleen __attribute__((weak)) struct page *
2570ceb86879SAndi Kleen follow_huge_pud(struct mm_struct *mm, unsigned long address,
2571ceb86879SAndi Kleen 	       pud_t *pud, int write)
2572ceb86879SAndi Kleen {
2573ceb86879SAndi Kleen 	BUG();
2574ceb86879SAndi Kleen 	return NULL;
2575ceb86879SAndi Kleen }
2576ceb86879SAndi Kleen 
257763551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
257863551ae0SDavid Gibson 			struct page **pages, struct vm_area_struct **vmas,
25795b23dbe8SAdam Litke 			unsigned long *position, int *length, int i,
25802a15efc9SHugh Dickins 			unsigned int flags)
258163551ae0SDavid Gibson {
2582d5d4b0aaSChen, Kenneth W 	unsigned long pfn_offset;
2583d5d4b0aaSChen, Kenneth W 	unsigned long vaddr = *position;
258463551ae0SDavid Gibson 	int remainder = *length;
2585a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
258663551ae0SDavid Gibson 
25871c59827dSHugh Dickins 	spin_lock(&mm->page_table_lock);
258863551ae0SDavid Gibson 	while (vaddr < vma->vm_end && remainder) {
258963551ae0SDavid Gibson 		pte_t *pte;
25902a15efc9SHugh Dickins 		int absent;
259163551ae0SDavid Gibson 		struct page *page;
259263551ae0SDavid Gibson 
25934c887265SAdam Litke 		/*
25944c887265SAdam Litke 		 * Some archs (sparc64, sh*) have multiple pte_ts to
25952a15efc9SHugh Dickins 		 * each hugepage.  We have to make sure we get the
25964c887265SAdam Litke 		 * first, for the page indexing below to work.
25974c887265SAdam Litke 		 */
2598a5516438SAndi Kleen 		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
25992a15efc9SHugh Dickins 		absent = !pte || huge_pte_none(huge_ptep_get(pte));
260063551ae0SDavid Gibson 
26012a15efc9SHugh Dickins 		/*
26022a15efc9SHugh Dickins 		 * When coredumping, it suits get_dump_page if we just return
26033ae77f43SHugh Dickins 		 * an error where there's an empty slot with no huge pagecache
26043ae77f43SHugh Dickins 		 * to back it.  This way, we avoid allocating a hugepage, and
26053ae77f43SHugh Dickins 		 * the sparse dumpfile avoids allocating disk blocks, but its
26063ae77f43SHugh Dickins 		 * huge holes still show up with zeroes where they need to be.
26072a15efc9SHugh Dickins 		 */
26083ae77f43SHugh Dickins 		if (absent && (flags & FOLL_DUMP) &&
26093ae77f43SHugh Dickins 		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
26102a15efc9SHugh Dickins 			remainder = 0;
26112a15efc9SHugh Dickins 			break;
26122a15efc9SHugh Dickins 		}
26132a15efc9SHugh Dickins 
26142a15efc9SHugh Dickins 		if (absent ||
26152a15efc9SHugh Dickins 		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
26164c887265SAdam Litke 			int ret;
26174c887265SAdam Litke 
26184c887265SAdam Litke 			spin_unlock(&mm->page_table_lock);
26192a15efc9SHugh Dickins 			ret = hugetlb_fault(mm, vma, vaddr,
26202a15efc9SHugh Dickins 				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
26214c887265SAdam Litke 			spin_lock(&mm->page_table_lock);
2622a89182c7SAdam Litke 			if (!(ret & VM_FAULT_ERROR))
26234c887265SAdam Litke 				continue;
26244c887265SAdam Litke 
26251c59827dSHugh Dickins 			remainder = 0;
26261c59827dSHugh Dickins 			break;
26271c59827dSHugh Dickins 		}
262863551ae0SDavid Gibson 
2629a5516438SAndi Kleen 		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
26307f2e9525SGerald Schaefer 		page = pte_page(huge_ptep_get(pte));
2631d5d4b0aaSChen, Kenneth W same_page:
2632d6692183SChen, Kenneth W 		if (pages) {
263369d177c2SAndy Whitcroft 			pages[i] = mem_map_offset(page, pfn_offset);
26344b2e38adSKOSAKI Motohiro 			get_page(pages[i]);
2635d6692183SChen, Kenneth W 		}
263663551ae0SDavid Gibson 
263763551ae0SDavid Gibson 		if (vmas)
263863551ae0SDavid Gibson 			vmas[i] = vma;
263963551ae0SDavid Gibson 
264063551ae0SDavid Gibson 		vaddr += PAGE_SIZE;
2641d5d4b0aaSChen, Kenneth W 		++pfn_offset;
264263551ae0SDavid Gibson 		--remainder;
264363551ae0SDavid Gibson 		++i;
2644d5d4b0aaSChen, Kenneth W 		if (vaddr < vma->vm_end && remainder &&
2645a5516438SAndi Kleen 				pfn_offset < pages_per_huge_page(h)) {
2646d5d4b0aaSChen, Kenneth W 			/*
2647d5d4b0aaSChen, Kenneth W 			 * We use pfn_offset to avoid touching the pageframes
2648d5d4b0aaSChen, Kenneth W 			 * of this compound page.
2649d5d4b0aaSChen, Kenneth W 			 */
2650d5d4b0aaSChen, Kenneth W 			goto same_page;
2651d5d4b0aaSChen, Kenneth W 		}
265263551ae0SDavid Gibson 	}
26531c59827dSHugh Dickins 	spin_unlock(&mm->page_table_lock);
265463551ae0SDavid Gibson 	*length = remainder;
265563551ae0SDavid Gibson 	*position = vaddr;
265663551ae0SDavid Gibson 
26572a15efc9SHugh Dickins 	return i ? i : -EFAULT;
265863551ae0SDavid Gibson }
26598f860591SZhang, Yanmin 
26608f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma,
26618f860591SZhang, Yanmin 		unsigned long address, unsigned long end, pgprot_t newprot)
26628f860591SZhang, Yanmin {
26638f860591SZhang, Yanmin 	struct mm_struct *mm = vma->vm_mm;
26648f860591SZhang, Yanmin 	unsigned long start = address;
26658f860591SZhang, Yanmin 	pte_t *ptep;
26668f860591SZhang, Yanmin 	pte_t pte;
2667a5516438SAndi Kleen 	struct hstate *h = hstate_vma(vma);
26688f860591SZhang, Yanmin 
26698f860591SZhang, Yanmin 	BUG_ON(address >= end);
26708f860591SZhang, Yanmin 	flush_cache_range(vma, address, end);
26718f860591SZhang, Yanmin 
267239dde65cSChen, Kenneth W 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
26738f860591SZhang, Yanmin 	spin_lock(&mm->page_table_lock);
2674a5516438SAndi Kleen 	for (; address < end; address += huge_page_size(h)) {
26758f860591SZhang, Yanmin 		ptep = huge_pte_offset(mm, address);
26768f860591SZhang, Yanmin 		if (!ptep)
26778f860591SZhang, Yanmin 			continue;
267839dde65cSChen, Kenneth W 		if (huge_pmd_unshare(mm, &address, ptep))
267939dde65cSChen, Kenneth W 			continue;
26807f2e9525SGerald Schaefer 		if (!huge_pte_none(huge_ptep_get(ptep))) {
26818f860591SZhang, Yanmin 			pte = huge_ptep_get_and_clear(mm, address, ptep);
26828f860591SZhang, Yanmin 			pte = pte_mkhuge(pte_modify(pte, newprot));
26838f860591SZhang, Yanmin 			set_huge_pte_at(mm, address, ptep, pte);
26848f860591SZhang, Yanmin 		}
26858f860591SZhang, Yanmin 	}
26868f860591SZhang, Yanmin 	spin_unlock(&mm->page_table_lock);
268739dde65cSChen, Kenneth W 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
26888f860591SZhang, Yanmin 
26898f860591SZhang, Yanmin 	flush_tlb_range(vma, start, end);
26908f860591SZhang, Yanmin }
26918f860591SZhang, Yanmin 
2692a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode,
2693a1e78772SMel Gorman 					long from, long to,
26945a6fe125SMel Gorman 					struct vm_area_struct *vma,
26955a6fe125SMel Gorman 					int acctflag)
2696e4e574b7SAdam Litke {
269717c9d12eSMel Gorman 	long ret, chg;
2698a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
2699e4e574b7SAdam Litke 
2700a1e78772SMel Gorman 	/*
270117c9d12eSMel Gorman 	 * Only apply hugepage reservation if asked. At fault time, an
270217c9d12eSMel Gorman 	 * attempt will be made for VM_NORESERVE to allocate a page
270317c9d12eSMel Gorman 	 * and filesystem quota without using reserves
270417c9d12eSMel Gorman 	 */
270517c9d12eSMel Gorman 	if (acctflag & VM_NORESERVE)
270617c9d12eSMel Gorman 		return 0;
270717c9d12eSMel Gorman 
270817c9d12eSMel Gorman 	/*
2709a1e78772SMel Gorman 	 * Shared mappings base their reservation on the number of pages that
2710a1e78772SMel Gorman 	 * are already allocated on behalf of the file. Private mappings need
2711a1e78772SMel Gorman 	 * to reserve the full area even if read-only as mprotect() may be
2712a1e78772SMel Gorman 	 * called to make the mapping read-write. Assume !vma is a shm mapping
2713a1e78772SMel Gorman 	 */
2714f83a275dSMel Gorman 	if (!vma || vma->vm_flags & VM_MAYSHARE)
2715e4e574b7SAdam Litke 		chg = region_chg(&inode->i_mapping->private_list, from, to);
27165a6fe125SMel Gorman 	else {
27175a6fe125SMel Gorman 		struct resv_map *resv_map = resv_map_alloc();
27185a6fe125SMel Gorman 		if (!resv_map)
27195a6fe125SMel Gorman 			return -ENOMEM;
27205a6fe125SMel Gorman 
272117c9d12eSMel Gorman 		chg = to - from;
272217c9d12eSMel Gorman 
27235a6fe125SMel Gorman 		set_vma_resv_map(vma, resv_map);
27245a6fe125SMel Gorman 		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
27255a6fe125SMel Gorman 	}
27265a6fe125SMel Gorman 
272717c9d12eSMel Gorman 	if (chg < 0)
272817c9d12eSMel Gorman 		return chg;
272917c9d12eSMel Gorman 
273017c9d12eSMel Gorman 	/* There must be enough filesystem quota for the mapping */
273117c9d12eSMel Gorman 	if (hugetlb_get_quota(inode->i_mapping, chg))
273217c9d12eSMel Gorman 		return -ENOSPC;
273317c9d12eSMel Gorman 
273417c9d12eSMel Gorman 	/*
273517c9d12eSMel Gorman 	 * Check enough hugepages are available for the reservation.
273617c9d12eSMel Gorman 	 * Hand back the quota if there are not
273717c9d12eSMel Gorman 	 */
273817c9d12eSMel Gorman 	ret = hugetlb_acct_memory(h, chg);
273917c9d12eSMel Gorman 	if (ret < 0) {
274017c9d12eSMel Gorman 		hugetlb_put_quota(inode->i_mapping, chg);
274117c9d12eSMel Gorman 		return ret;
274217c9d12eSMel Gorman 	}
274317c9d12eSMel Gorman 
274417c9d12eSMel Gorman 	/*
274517c9d12eSMel Gorman 	 * Account for the reservations made. Shared mappings record regions
274617c9d12eSMel Gorman 	 * that have reservations as they are shared by multiple VMAs.
274717c9d12eSMel Gorman 	 * When the last VMA disappears, the region map says how much
274817c9d12eSMel Gorman 	 * the reservation was and the page cache tells how much of
274917c9d12eSMel Gorman 	 * the reservation was consumed. Private mappings are per-VMA and
275017c9d12eSMel Gorman 	 * only the consumed reservations are tracked. When the VMA
275117c9d12eSMel Gorman 	 * disappears, the original reservation is the VMA size and the
275217c9d12eSMel Gorman 	 * consumed reservations are stored in the map. Hence, nothing
275317c9d12eSMel Gorman 	 * else has to be done for private mappings here
275417c9d12eSMel Gorman 	 */
2755f83a275dSMel Gorman 	if (!vma || vma->vm_flags & VM_MAYSHARE)
275617c9d12eSMel Gorman 		region_add(&inode->i_mapping->private_list, from, to);
2757a43a8c39SChen, Kenneth W 	return 0;
2758a43a8c39SChen, Kenneth W }
2759a43a8c39SChen, Kenneth W 
2760a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2761a43a8c39SChen, Kenneth W {
2762a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
2763a43a8c39SChen, Kenneth W 	long chg = region_truncate(&inode->i_mapping->private_list, offset);
276445c682a6SKen Chen 
276545c682a6SKen Chen 	spin_lock(&inode->i_lock);
2766e4c6f8beSEric Sandeen 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
276745c682a6SKen Chen 	spin_unlock(&inode->i_lock);
276845c682a6SKen Chen 
276990d8b7e6SAdam Litke 	hugetlb_put_quota(inode->i_mapping, (chg - freed));
2770a5516438SAndi Kleen 	hugetlb_acct_memory(h, -(chg - freed));
2771a43a8c39SChen, Kenneth W }
2772