11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/list.h> 61da177e4SLinus Torvalds #include <linux/init.h> 71da177e4SLinus Torvalds #include <linux/module.h> 81da177e4SLinus Torvalds #include <linux/mm.h> 9e1759c21SAlexey Dobriyan #include <linux/seq_file.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 131da177e4SLinus Torvalds #include <linux/nodemask.h> 1463551ae0SDavid Gibson #include <linux/pagemap.h> 155da7ca86SChristoph Lameter #include <linux/mempolicy.h> 16aea47ff3SChristoph Lameter #include <linux/cpuset.h> 173935baa9SDavid Gibson #include <linux/mutex.h> 18aa888a74SAndi Kleen #include <linux/bootmem.h> 19a3437870SNishanth Aravamudan #include <linux/sysfs.h> 205a0e3ad6STejun Heo #include <linux/slab.h> 210fe6e20bSNaoya Horiguchi #include <linux/rmap.h> 22fd6a03edSNaoya Horiguchi #include <linux/swap.h> 23fd6a03edSNaoya Horiguchi #include <linux/swapops.h> 24d6606683SLinus Torvalds 2563551ae0SDavid Gibson #include <asm/page.h> 2663551ae0SDavid Gibson #include <asm/pgtable.h> 2732f84528SChris Forbes #include <linux/io.h> 2863551ae0SDavid Gibson 2963551ae0SDavid Gibson #include <linux/hugetlb.h> 309a305230SLee Schermerhorn #include <linux/node.h> 317835e98bSNick Piggin #include "internal.h" 321da177e4SLinus Torvalds 331da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 34396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 35396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 36a5516438SAndi Kleen 37e5ff2159SAndi Kleen static int max_hstate; 38e5ff2159SAndi Kleen unsigned int default_hstate_idx; 39e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE]; 40e5ff2159SAndi Kleen 4153ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages); 4253ba51d2SJon Tollefson 43e5ff2159SAndi Kleen /* for command line parsing */ 44e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate; 45e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages; 46e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size; 47e5ff2159SAndi Kleen 48e5ff2159SAndi Kleen #define for_each_hstate(h) \ 49e5ff2159SAndi Kleen for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++) 50396faf03SMel Gorman 513935baa9SDavid Gibson /* 523935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 533935baa9SDavid Gibson */ 543935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 550bd0f9fbSEric Paris 56e7c4b0bfSAndy Whitcroft /* 5796822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 5896822904SAndy Whitcroft * across the pages in a mapping. 5984afd99bSAndy Whitcroft * 6084afd99bSAndy Whitcroft * The region data structures are protected by a combination of the mmap_sem 6184afd99bSAndy Whitcroft * and the hugetlb_instantion_mutex. To access or modify a region the caller 6284afd99bSAndy Whitcroft * must either hold the mmap_sem for write, or the mmap_sem for read and 6384afd99bSAndy Whitcroft * the hugetlb_instantiation mutex: 6484afd99bSAndy Whitcroft * 6584afd99bSAndy Whitcroft * down_write(&mm->mmap_sem); 6684afd99bSAndy Whitcroft * or 6784afd99bSAndy Whitcroft * down_read(&mm->mmap_sem); 6884afd99bSAndy Whitcroft * mutex_lock(&hugetlb_instantiation_mutex); 6996822904SAndy Whitcroft */ 7096822904SAndy Whitcroft struct file_region { 7196822904SAndy Whitcroft struct list_head link; 7296822904SAndy Whitcroft long from; 7396822904SAndy Whitcroft long to; 7496822904SAndy Whitcroft }; 7596822904SAndy Whitcroft 7696822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 7796822904SAndy Whitcroft { 7896822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 7996822904SAndy Whitcroft 8096822904SAndy Whitcroft /* Locate the region we are either in or before. */ 8196822904SAndy Whitcroft list_for_each_entry(rg, head, link) 8296822904SAndy Whitcroft if (f <= rg->to) 8396822904SAndy Whitcroft break; 8496822904SAndy Whitcroft 8596822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 8696822904SAndy Whitcroft if (f > rg->from) 8796822904SAndy Whitcroft f = rg->from; 8896822904SAndy Whitcroft 8996822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 9096822904SAndy Whitcroft nrg = rg; 9196822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 9296822904SAndy Whitcroft if (&rg->link == head) 9396822904SAndy Whitcroft break; 9496822904SAndy Whitcroft if (rg->from > t) 9596822904SAndy Whitcroft break; 9696822904SAndy Whitcroft 9796822904SAndy Whitcroft /* If this area reaches higher then extend our area to 9896822904SAndy Whitcroft * include it completely. If this is not the first area 9996822904SAndy Whitcroft * which we intend to reuse, free it. */ 10096822904SAndy Whitcroft if (rg->to > t) 10196822904SAndy Whitcroft t = rg->to; 10296822904SAndy Whitcroft if (rg != nrg) { 10396822904SAndy Whitcroft list_del(&rg->link); 10496822904SAndy Whitcroft kfree(rg); 10596822904SAndy Whitcroft } 10696822904SAndy Whitcroft } 10796822904SAndy Whitcroft nrg->from = f; 10896822904SAndy Whitcroft nrg->to = t; 10996822904SAndy Whitcroft return 0; 11096822904SAndy Whitcroft } 11196822904SAndy Whitcroft 11296822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 11396822904SAndy Whitcroft { 11496822904SAndy Whitcroft struct file_region *rg, *nrg; 11596822904SAndy Whitcroft long chg = 0; 11696822904SAndy Whitcroft 11796822904SAndy Whitcroft /* Locate the region we are before or in. */ 11896822904SAndy Whitcroft list_for_each_entry(rg, head, link) 11996822904SAndy Whitcroft if (f <= rg->to) 12096822904SAndy Whitcroft break; 12196822904SAndy Whitcroft 12296822904SAndy Whitcroft /* If we are below the current region then a new region is required. 12396822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 12496822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 12596822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 12696822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 12796822904SAndy Whitcroft if (!nrg) 12896822904SAndy Whitcroft return -ENOMEM; 12996822904SAndy Whitcroft nrg->from = f; 13096822904SAndy Whitcroft nrg->to = f; 13196822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 13296822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 13396822904SAndy Whitcroft 13496822904SAndy Whitcroft return t - f; 13596822904SAndy Whitcroft } 13696822904SAndy Whitcroft 13796822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 13896822904SAndy Whitcroft if (f > rg->from) 13996822904SAndy Whitcroft f = rg->from; 14096822904SAndy Whitcroft chg = t - f; 14196822904SAndy Whitcroft 14296822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 14396822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 14496822904SAndy Whitcroft if (&rg->link == head) 14596822904SAndy Whitcroft break; 14696822904SAndy Whitcroft if (rg->from > t) 14796822904SAndy Whitcroft return chg; 14896822904SAndy Whitcroft 14925985edcSLucas De Marchi /* We overlap with this area, if it extends further than 15096822904SAndy Whitcroft * us then we must extend ourselves. Account for its 15196822904SAndy Whitcroft * existing reservation. */ 15296822904SAndy Whitcroft if (rg->to > t) { 15396822904SAndy Whitcroft chg += rg->to - t; 15496822904SAndy Whitcroft t = rg->to; 15596822904SAndy Whitcroft } 15696822904SAndy Whitcroft chg -= rg->to - rg->from; 15796822904SAndy Whitcroft } 15896822904SAndy Whitcroft return chg; 15996822904SAndy Whitcroft } 16096822904SAndy Whitcroft 16196822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 16296822904SAndy Whitcroft { 16396822904SAndy Whitcroft struct file_region *rg, *trg; 16496822904SAndy Whitcroft long chg = 0; 16596822904SAndy Whitcroft 16696822904SAndy Whitcroft /* Locate the region we are either in or before. */ 16796822904SAndy Whitcroft list_for_each_entry(rg, head, link) 16896822904SAndy Whitcroft if (end <= rg->to) 16996822904SAndy Whitcroft break; 17096822904SAndy Whitcroft if (&rg->link == head) 17196822904SAndy Whitcroft return 0; 17296822904SAndy Whitcroft 17396822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 17496822904SAndy Whitcroft if (end > rg->from) { 17596822904SAndy Whitcroft chg = rg->to - end; 17696822904SAndy Whitcroft rg->to = end; 17796822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 17896822904SAndy Whitcroft } 17996822904SAndy Whitcroft 18096822904SAndy Whitcroft /* Drop any remaining regions. */ 18196822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 18296822904SAndy Whitcroft if (&rg->link == head) 18396822904SAndy Whitcroft break; 18496822904SAndy Whitcroft chg += rg->to - rg->from; 18596822904SAndy Whitcroft list_del(&rg->link); 18696822904SAndy Whitcroft kfree(rg); 18796822904SAndy Whitcroft } 18896822904SAndy Whitcroft return chg; 18996822904SAndy Whitcroft } 19096822904SAndy Whitcroft 19184afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t) 19284afd99bSAndy Whitcroft { 19384afd99bSAndy Whitcroft struct file_region *rg; 19484afd99bSAndy Whitcroft long chg = 0; 19584afd99bSAndy Whitcroft 19684afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 19784afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 19884afd99bSAndy Whitcroft int seg_from; 19984afd99bSAndy Whitcroft int seg_to; 20084afd99bSAndy Whitcroft 20184afd99bSAndy Whitcroft if (rg->to <= f) 20284afd99bSAndy Whitcroft continue; 20384afd99bSAndy Whitcroft if (rg->from >= t) 20484afd99bSAndy Whitcroft break; 20584afd99bSAndy Whitcroft 20684afd99bSAndy Whitcroft seg_from = max(rg->from, f); 20784afd99bSAndy Whitcroft seg_to = min(rg->to, t); 20884afd99bSAndy Whitcroft 20984afd99bSAndy Whitcroft chg += seg_to - seg_from; 21084afd99bSAndy Whitcroft } 21184afd99bSAndy Whitcroft 21284afd99bSAndy Whitcroft return chg; 21384afd99bSAndy Whitcroft } 21484afd99bSAndy Whitcroft 21596822904SAndy Whitcroft /* 216e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 217e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 218e7c4b0bfSAndy Whitcroft */ 219a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h, 220a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 221e7c4b0bfSAndy Whitcroft { 222a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) + 223a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h)); 224e7c4b0bfSAndy Whitcroft } 225e7c4b0bfSAndy Whitcroft 2260fe6e20bSNaoya Horiguchi pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 2270fe6e20bSNaoya Horiguchi unsigned long address) 2280fe6e20bSNaoya Horiguchi { 2290fe6e20bSNaoya Horiguchi return vma_hugecache_offset(hstate_vma(vma), vma, address); 2300fe6e20bSNaoya Horiguchi } 2310fe6e20bSNaoya Horiguchi 23284afd99bSAndy Whitcroft /* 23308fba699SMel Gorman * Return the size of the pages allocated when backing a VMA. In the majority 23408fba699SMel Gorman * cases this will be same size as used by the page table entries. 23508fba699SMel Gorman */ 23608fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 23708fba699SMel Gorman { 23808fba699SMel Gorman struct hstate *hstate; 23908fba699SMel Gorman 24008fba699SMel Gorman if (!is_vm_hugetlb_page(vma)) 24108fba699SMel Gorman return PAGE_SIZE; 24208fba699SMel Gorman 24308fba699SMel Gorman hstate = hstate_vma(vma); 24408fba699SMel Gorman 24508fba699SMel Gorman return 1UL << (hstate->order + PAGE_SHIFT); 24608fba699SMel Gorman } 247f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 24808fba699SMel Gorman 24908fba699SMel Gorman /* 2503340289dSMel Gorman * Return the page size being used by the MMU to back a VMA. In the majority 2513340289dSMel Gorman * of cases, the page size used by the kernel matches the MMU size. On 2523340289dSMel Gorman * architectures where it differs, an architecture-specific version of this 2533340289dSMel Gorman * function is required. 2543340289dSMel Gorman */ 2553340289dSMel Gorman #ifndef vma_mmu_pagesize 2563340289dSMel Gorman unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 2573340289dSMel Gorman { 2583340289dSMel Gorman return vma_kernel_pagesize(vma); 2593340289dSMel Gorman } 2603340289dSMel Gorman #endif 2613340289dSMel Gorman 2623340289dSMel Gorman /* 26384afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 26484afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 26584afd99bSAndy Whitcroft * alignment. 26684afd99bSAndy Whitcroft */ 26784afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 26884afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 26904f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 27084afd99bSAndy Whitcroft 271a1e78772SMel Gorman /* 272a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 273a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 274a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 275a1e78772SMel Gorman * 276a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 277a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 278a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 279a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 28084afd99bSAndy Whitcroft * 28184afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 28284afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 28384afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 28484afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 28584afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 28684afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 28784afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 28884afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 289a1e78772SMel Gorman */ 290e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 291e7c4b0bfSAndy Whitcroft { 292e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 293e7c4b0bfSAndy Whitcroft } 294e7c4b0bfSAndy Whitcroft 295e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 296e7c4b0bfSAndy Whitcroft unsigned long value) 297e7c4b0bfSAndy Whitcroft { 298e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 299e7c4b0bfSAndy Whitcroft } 300e7c4b0bfSAndy Whitcroft 30184afd99bSAndy Whitcroft struct resv_map { 30284afd99bSAndy Whitcroft struct kref refs; 30384afd99bSAndy Whitcroft struct list_head regions; 30484afd99bSAndy Whitcroft }; 30584afd99bSAndy Whitcroft 3062a4b3dedSHarvey Harrison static struct resv_map *resv_map_alloc(void) 30784afd99bSAndy Whitcroft { 30884afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 30984afd99bSAndy Whitcroft if (!resv_map) 31084afd99bSAndy Whitcroft return NULL; 31184afd99bSAndy Whitcroft 31284afd99bSAndy Whitcroft kref_init(&resv_map->refs); 31384afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 31484afd99bSAndy Whitcroft 31584afd99bSAndy Whitcroft return resv_map; 31684afd99bSAndy Whitcroft } 31784afd99bSAndy Whitcroft 3182a4b3dedSHarvey Harrison static void resv_map_release(struct kref *ref) 31984afd99bSAndy Whitcroft { 32084afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 32184afd99bSAndy Whitcroft 32284afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 32384afd99bSAndy Whitcroft region_truncate(&resv_map->regions, 0); 32484afd99bSAndy Whitcroft kfree(resv_map); 32584afd99bSAndy Whitcroft } 32684afd99bSAndy Whitcroft 32784afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 328a1e78772SMel Gorman { 329a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 330f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 33184afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 33284afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 3332a4b3dedSHarvey Harrison return NULL; 334a1e78772SMel Gorman } 335a1e78772SMel Gorman 33684afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 337a1e78772SMel Gorman { 338a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 339f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 340a1e78772SMel Gorman 34184afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 34284afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 34304f2cbe3SMel Gorman } 34404f2cbe3SMel Gorman 34504f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 34604f2cbe3SMel Gorman { 34704f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 348f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 349e7c4b0bfSAndy Whitcroft 350e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 35104f2cbe3SMel Gorman } 35204f2cbe3SMel Gorman 35304f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 35404f2cbe3SMel Gorman { 35504f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 356e7c4b0bfSAndy Whitcroft 357e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 358a1e78772SMel Gorman } 359a1e78772SMel Gorman 360a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */ 361a5516438SAndi Kleen static void decrement_hugepage_resv_vma(struct hstate *h, 362a5516438SAndi Kleen struct vm_area_struct *vma) 363a1e78772SMel Gorman { 364c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_NORESERVE) 365c37f9fb1SAndy Whitcroft return; 366c37f9fb1SAndy Whitcroft 367f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 368a1e78772SMel Gorman /* Shared mappings always use reserves */ 369a5516438SAndi Kleen h->resv_huge_pages--; 37084afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 371a1e78772SMel Gorman /* 372a1e78772SMel Gorman * Only the process that called mmap() has reserves for 373a1e78772SMel Gorman * private mappings. 374a1e78772SMel Gorman */ 375a5516438SAndi Kleen h->resv_huge_pages--; 376a1e78772SMel Gorman } 377a1e78772SMel Gorman } 378a1e78772SMel Gorman 37904f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 380a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 381a1e78772SMel Gorman { 382a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 383f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 384a1e78772SMel Gorman vma->vm_private_data = (void *)0; 385a1e78772SMel Gorman } 386a1e78772SMel Gorman 387a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 3887f09ca51SMel Gorman static int vma_has_reserves(struct vm_area_struct *vma) 389a1e78772SMel Gorman { 390f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) 391a1e78772SMel Gorman return 1; 3927f09ca51SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3937f09ca51SMel Gorman return 1; 3947f09ca51SMel Gorman return 0; 395a1e78772SMel Gorman } 396a1e78772SMel Gorman 3970ebabb41SNaoya Horiguchi static void copy_gigantic_page(struct page *dst, struct page *src) 3980ebabb41SNaoya Horiguchi { 3990ebabb41SNaoya Horiguchi int i; 4000ebabb41SNaoya Horiguchi struct hstate *h = page_hstate(src); 4010ebabb41SNaoya Horiguchi struct page *dst_base = dst; 4020ebabb41SNaoya Horiguchi struct page *src_base = src; 4030ebabb41SNaoya Horiguchi 4040ebabb41SNaoya Horiguchi for (i = 0; i < pages_per_huge_page(h); ) { 4050ebabb41SNaoya Horiguchi cond_resched(); 4060ebabb41SNaoya Horiguchi copy_highpage(dst, src); 4070ebabb41SNaoya Horiguchi 4080ebabb41SNaoya Horiguchi i++; 4090ebabb41SNaoya Horiguchi dst = mem_map_next(dst, dst_base, i); 4100ebabb41SNaoya Horiguchi src = mem_map_next(src, src_base, i); 4110ebabb41SNaoya Horiguchi } 4120ebabb41SNaoya Horiguchi } 4130ebabb41SNaoya Horiguchi 4140ebabb41SNaoya Horiguchi void copy_huge_page(struct page *dst, struct page *src) 4150ebabb41SNaoya Horiguchi { 4160ebabb41SNaoya Horiguchi int i; 4170ebabb41SNaoya Horiguchi struct hstate *h = page_hstate(src); 4180ebabb41SNaoya Horiguchi 4190ebabb41SNaoya Horiguchi if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { 4200ebabb41SNaoya Horiguchi copy_gigantic_page(dst, src); 4210ebabb41SNaoya Horiguchi return; 4220ebabb41SNaoya Horiguchi } 4230ebabb41SNaoya Horiguchi 4240ebabb41SNaoya Horiguchi might_sleep(); 4250ebabb41SNaoya Horiguchi for (i = 0; i < pages_per_huge_page(h); i++) { 4260ebabb41SNaoya Horiguchi cond_resched(); 4270ebabb41SNaoya Horiguchi copy_highpage(dst + i, src + i); 4280ebabb41SNaoya Horiguchi } 4290ebabb41SNaoya Horiguchi } 4300ebabb41SNaoya Horiguchi 431a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page) 4321da177e4SLinus Torvalds { 4331da177e4SLinus Torvalds int nid = page_to_nid(page); 434a5516438SAndi Kleen list_add(&page->lru, &h->hugepage_freelists[nid]); 435a5516438SAndi Kleen h->free_huge_pages++; 436a5516438SAndi Kleen h->free_huge_pages_node[nid]++; 4371da177e4SLinus Torvalds } 4381da177e4SLinus Torvalds 439bf50bab2SNaoya Horiguchi static struct page *dequeue_huge_page_node(struct hstate *h, int nid) 440bf50bab2SNaoya Horiguchi { 441bf50bab2SNaoya Horiguchi struct page *page; 442bf50bab2SNaoya Horiguchi 443bf50bab2SNaoya Horiguchi if (list_empty(&h->hugepage_freelists[nid])) 444bf50bab2SNaoya Horiguchi return NULL; 445bf50bab2SNaoya Horiguchi page = list_entry(h->hugepage_freelists[nid].next, struct page, lru); 446bf50bab2SNaoya Horiguchi list_del(&page->lru); 447a9869b83SNaoya Horiguchi set_page_refcounted(page); 448bf50bab2SNaoya Horiguchi h->free_huge_pages--; 449bf50bab2SNaoya Horiguchi h->free_huge_pages_node[nid]--; 450bf50bab2SNaoya Horiguchi return page; 451bf50bab2SNaoya Horiguchi } 452bf50bab2SNaoya Horiguchi 453a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h, 454a5516438SAndi Kleen struct vm_area_struct *vma, 45504f2cbe3SMel Gorman unsigned long address, int avoid_reserve) 4561da177e4SLinus Torvalds { 4571da177e4SLinus Torvalds struct page *page = NULL; 458480eccf9SLee Schermerhorn struct mempolicy *mpol; 45919770b32SMel Gorman nodemask_t *nodemask; 460c0ff7453SMiao Xie struct zonelist *zonelist; 461dd1a239fSMel Gorman struct zone *zone; 462dd1a239fSMel Gorman struct zoneref *z; 4631da177e4SLinus Torvalds 464c0ff7453SMiao Xie get_mems_allowed(); 465c0ff7453SMiao Xie zonelist = huge_zonelist(vma, address, 466c0ff7453SMiao Xie htlb_alloc_mask, &mpol, &nodemask); 467a1e78772SMel Gorman /* 468a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 469a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 470a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 471a1e78772SMel Gorman */ 4727f09ca51SMel Gorman if (!vma_has_reserves(vma) && 473a5516438SAndi Kleen h->free_huge_pages - h->resv_huge_pages == 0) 474c0ff7453SMiao Xie goto err; 475a1e78772SMel Gorman 47604f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 477a5516438SAndi Kleen if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 4786eab04a8SJustin P. Mattock goto err; 47904f2cbe3SMel Gorman 48019770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 48119770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 482bf50bab2SNaoya Horiguchi if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) { 483bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, zone_to_nid(zone)); 484bf50bab2SNaoya Horiguchi if (page) { 48504f2cbe3SMel Gorman if (!avoid_reserve) 486a5516438SAndi Kleen decrement_hugepage_resv_vma(h, vma); 4875ab3ee7bSKen Chen break; 4881da177e4SLinus Torvalds } 4893abf7afdSAndrew Morton } 490bf50bab2SNaoya Horiguchi } 491c0ff7453SMiao Xie err: 49252cd3b07SLee Schermerhorn mpol_cond_put(mpol); 493c0ff7453SMiao Xie put_mems_allowed(); 4941da177e4SLinus Torvalds return page; 4951da177e4SLinus Torvalds } 4961da177e4SLinus Torvalds 497a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page) 4986af2acb6SAdam Litke { 4996af2acb6SAdam Litke int i; 500a5516438SAndi Kleen 50118229df5SAndy Whitcroft VM_BUG_ON(h->order >= MAX_ORDER); 50218229df5SAndy Whitcroft 503a5516438SAndi Kleen h->nr_huge_pages--; 504a5516438SAndi Kleen h->nr_huge_pages_node[page_to_nid(page)]--; 505a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 50632f84528SChris Forbes page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 50732f84528SChris Forbes 1 << PG_referenced | 1 << PG_dirty | 50832f84528SChris Forbes 1 << PG_active | 1 << PG_reserved | 5096af2acb6SAdam Litke 1 << PG_private | 1 << PG_writeback); 5106af2acb6SAdam Litke } 5116af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 5126af2acb6SAdam Litke set_page_refcounted(page); 5137f2e9525SGerald Schaefer arch_release_hugepage(page); 514a5516438SAndi Kleen __free_pages(page, huge_page_order(h)); 5156af2acb6SAdam Litke } 5166af2acb6SAdam Litke 517e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size) 518e5ff2159SAndi Kleen { 519e5ff2159SAndi Kleen struct hstate *h; 520e5ff2159SAndi Kleen 521e5ff2159SAndi Kleen for_each_hstate(h) { 522e5ff2159SAndi Kleen if (huge_page_size(h) == size) 523e5ff2159SAndi Kleen return h; 524e5ff2159SAndi Kleen } 525e5ff2159SAndi Kleen return NULL; 526e5ff2159SAndi Kleen } 527e5ff2159SAndi Kleen 52827a85ef1SDavid Gibson static void free_huge_page(struct page *page) 52927a85ef1SDavid Gibson { 530a5516438SAndi Kleen /* 531a5516438SAndi Kleen * Can't pass hstate in here because it is called from the 532a5516438SAndi Kleen * compound page destructor. 533a5516438SAndi Kleen */ 534e5ff2159SAndi Kleen struct hstate *h = page_hstate(page); 5357893d1d5SAdam Litke int nid = page_to_nid(page); 536c79fb75eSAdam Litke struct address_space *mapping; 53727a85ef1SDavid Gibson 538c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 539e5df70abSAndy Whitcroft set_page_private(page, 0); 54023be7468SMel Gorman page->mapping = NULL; 5417893d1d5SAdam Litke BUG_ON(page_count(page)); 5420fe6e20bSNaoya Horiguchi BUG_ON(page_mapcount(page)); 54327a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 54427a85ef1SDavid Gibson 54527a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 546aa888a74SAndi Kleen if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { 547a5516438SAndi Kleen update_and_free_page(h, page); 548a5516438SAndi Kleen h->surplus_huge_pages--; 549a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 5507893d1d5SAdam Litke } else { 551a5516438SAndi Kleen enqueue_huge_page(h, page); 5527893d1d5SAdam Litke } 55327a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 554c79fb75eSAdam Litke if (mapping) 5559a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 55627a85ef1SDavid Gibson } 55727a85ef1SDavid Gibson 558a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 559b7ba30c6SAndi Kleen { 560b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 561b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 562a5516438SAndi Kleen h->nr_huge_pages++; 563a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 564b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 565b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 566b7ba30c6SAndi Kleen } 567b7ba30c6SAndi Kleen 56820a0307cSWu Fengguang static void prep_compound_gigantic_page(struct page *page, unsigned long order) 56920a0307cSWu Fengguang { 57020a0307cSWu Fengguang int i; 57120a0307cSWu Fengguang int nr_pages = 1 << order; 57220a0307cSWu Fengguang struct page *p = page + 1; 57320a0307cSWu Fengguang 57420a0307cSWu Fengguang /* we rely on prep_new_huge_page to set the destructor */ 57520a0307cSWu Fengguang set_compound_order(page, order); 57620a0307cSWu Fengguang __SetPageHead(page); 57720a0307cSWu Fengguang for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 57820a0307cSWu Fengguang __SetPageTail(p); 57958a84aa9SYouquan Song set_page_count(p, 0); 58020a0307cSWu Fengguang p->first_page = page; 58120a0307cSWu Fengguang } 58220a0307cSWu Fengguang } 58320a0307cSWu Fengguang 58420a0307cSWu Fengguang int PageHuge(struct page *page) 58520a0307cSWu Fengguang { 58620a0307cSWu Fengguang compound_page_dtor *dtor; 58720a0307cSWu Fengguang 58820a0307cSWu Fengguang if (!PageCompound(page)) 58920a0307cSWu Fengguang return 0; 59020a0307cSWu Fengguang 59120a0307cSWu Fengguang page = compound_head(page); 59220a0307cSWu Fengguang dtor = get_compound_page_dtor(page); 59320a0307cSWu Fengguang 59420a0307cSWu Fengguang return dtor == free_huge_page; 59520a0307cSWu Fengguang } 59643131e14SNaoya Horiguchi EXPORT_SYMBOL_GPL(PageHuge); 59743131e14SNaoya Horiguchi 598a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 5991da177e4SLinus Torvalds { 6001da177e4SLinus Torvalds struct page *page; 601f96efd58SJoe Jin 602aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 603aa888a74SAndi Kleen return NULL; 604aa888a74SAndi Kleen 6056484eb3eSMel Gorman page = alloc_pages_exact_node(nid, 606551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 607551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 608a5516438SAndi Kleen huge_page_order(h)); 6091da177e4SLinus Torvalds if (page) { 6107f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 611caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 6127b8ee84dSHarvey Harrison return NULL; 6137f2e9525SGerald Schaefer } 614a5516438SAndi Kleen prep_new_huge_page(h, page, nid); 6151da177e4SLinus Torvalds } 61663b4613cSNishanth Aravamudan 61763b4613cSNishanth Aravamudan return page; 61863b4613cSNishanth Aravamudan } 61963b4613cSNishanth Aravamudan 6205ced66c9SAndi Kleen /* 6216ae11b27SLee Schermerhorn * common helper functions for hstate_next_node_to_{alloc|free}. 6226ae11b27SLee Schermerhorn * We may have allocated or freed a huge page based on a different 6236ae11b27SLee Schermerhorn * nodes_allowed previously, so h->next_node_to_{alloc|free} might 6246ae11b27SLee Schermerhorn * be outside of *nodes_allowed. Ensure that we use an allowed 6256ae11b27SLee Schermerhorn * node for alloc or free. 6269a76db09SLee Schermerhorn */ 6276ae11b27SLee Schermerhorn static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 6289a76db09SLee Schermerhorn { 6296ae11b27SLee Schermerhorn nid = next_node(nid, *nodes_allowed); 6309a76db09SLee Schermerhorn if (nid == MAX_NUMNODES) 6316ae11b27SLee Schermerhorn nid = first_node(*nodes_allowed); 6329a76db09SLee Schermerhorn VM_BUG_ON(nid >= MAX_NUMNODES); 6339a76db09SLee Schermerhorn 6349a76db09SLee Schermerhorn return nid; 6359a76db09SLee Schermerhorn } 6369a76db09SLee Schermerhorn 6376ae11b27SLee Schermerhorn static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 6385ced66c9SAndi Kleen { 6396ae11b27SLee Schermerhorn if (!node_isset(nid, *nodes_allowed)) 6406ae11b27SLee Schermerhorn nid = next_node_allowed(nid, nodes_allowed); 6419a76db09SLee Schermerhorn return nid; 6425ced66c9SAndi Kleen } 6435ced66c9SAndi Kleen 6446ae11b27SLee Schermerhorn /* 6456ae11b27SLee Schermerhorn * returns the previously saved node ["this node"] from which to 6466ae11b27SLee Schermerhorn * allocate a persistent huge page for the pool and advance the 6476ae11b27SLee Schermerhorn * next node from which to allocate, handling wrap at end of node 6486ae11b27SLee Schermerhorn * mask. 6496ae11b27SLee Schermerhorn */ 6506ae11b27SLee Schermerhorn static int hstate_next_node_to_alloc(struct hstate *h, 6516ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 6526ae11b27SLee Schermerhorn { 6536ae11b27SLee Schermerhorn int nid; 6546ae11b27SLee Schermerhorn 6556ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 6566ae11b27SLee Schermerhorn 6576ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 6586ae11b27SLee Schermerhorn h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 6596ae11b27SLee Schermerhorn 6606ae11b27SLee Schermerhorn return nid; 6616ae11b27SLee Schermerhorn } 6626ae11b27SLee Schermerhorn 6636ae11b27SLee Schermerhorn static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 66463b4613cSNishanth Aravamudan { 66563b4613cSNishanth Aravamudan struct page *page; 66663b4613cSNishanth Aravamudan int start_nid; 66763b4613cSNishanth Aravamudan int next_nid; 66863b4613cSNishanth Aravamudan int ret = 0; 66963b4613cSNishanth Aravamudan 6706ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_alloc(h, nodes_allowed); 671e8c5c824SLee Schermerhorn next_nid = start_nid; 67263b4613cSNishanth Aravamudan 67363b4613cSNishanth Aravamudan do { 674e8c5c824SLee Schermerhorn page = alloc_fresh_huge_page_node(h, next_nid); 6759a76db09SLee Schermerhorn if (page) { 67663b4613cSNishanth Aravamudan ret = 1; 6779a76db09SLee Schermerhorn break; 6789a76db09SLee Schermerhorn } 6796ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_alloc(h, nodes_allowed); 6809a76db09SLee Schermerhorn } while (next_nid != start_nid); 68163b4613cSNishanth Aravamudan 6823b116300SAdam Litke if (ret) 6833b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 6843b116300SAdam Litke else 6853b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 6863b116300SAdam Litke 68763b4613cSNishanth Aravamudan return ret; 6881da177e4SLinus Torvalds } 6891da177e4SLinus Torvalds 690e8c5c824SLee Schermerhorn /* 6916ae11b27SLee Schermerhorn * helper for free_pool_huge_page() - return the previously saved 6926ae11b27SLee Schermerhorn * node ["this node"] from which to free a huge page. Advance the 6936ae11b27SLee Schermerhorn * next node id whether or not we find a free huge page to free so 6946ae11b27SLee Schermerhorn * that the next attempt to free addresses the next node. 695e8c5c824SLee Schermerhorn */ 6966ae11b27SLee Schermerhorn static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 697e8c5c824SLee Schermerhorn { 6986ae11b27SLee Schermerhorn int nid; 6999a76db09SLee Schermerhorn 7006ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 7016ae11b27SLee Schermerhorn 7026ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 7036ae11b27SLee Schermerhorn h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 7046ae11b27SLee Schermerhorn 7059a76db09SLee Schermerhorn return nid; 706e8c5c824SLee Schermerhorn } 707e8c5c824SLee Schermerhorn 708e8c5c824SLee Schermerhorn /* 709e8c5c824SLee Schermerhorn * Free huge page from pool from next node to free. 710e8c5c824SLee Schermerhorn * Attempt to keep persistent huge pages more or less 711e8c5c824SLee Schermerhorn * balanced over allowed nodes. 712e8c5c824SLee Schermerhorn * Called with hugetlb_lock locked. 713e8c5c824SLee Schermerhorn */ 7146ae11b27SLee Schermerhorn static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 7156ae11b27SLee Schermerhorn bool acct_surplus) 716e8c5c824SLee Schermerhorn { 717e8c5c824SLee Schermerhorn int start_nid; 718e8c5c824SLee Schermerhorn int next_nid; 719e8c5c824SLee Schermerhorn int ret = 0; 720e8c5c824SLee Schermerhorn 7216ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_free(h, nodes_allowed); 722e8c5c824SLee Schermerhorn next_nid = start_nid; 723e8c5c824SLee Schermerhorn 724e8c5c824SLee Schermerhorn do { 725685f3457SLee Schermerhorn /* 726685f3457SLee Schermerhorn * If we're returning unused surplus pages, only examine 727685f3457SLee Schermerhorn * nodes with surplus pages. 728685f3457SLee Schermerhorn */ 729685f3457SLee Schermerhorn if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) && 730685f3457SLee Schermerhorn !list_empty(&h->hugepage_freelists[next_nid])) { 731e8c5c824SLee Schermerhorn struct page *page = 732e8c5c824SLee Schermerhorn list_entry(h->hugepage_freelists[next_nid].next, 733e8c5c824SLee Schermerhorn struct page, lru); 734e8c5c824SLee Schermerhorn list_del(&page->lru); 735e8c5c824SLee Schermerhorn h->free_huge_pages--; 736e8c5c824SLee Schermerhorn h->free_huge_pages_node[next_nid]--; 737685f3457SLee Schermerhorn if (acct_surplus) { 738685f3457SLee Schermerhorn h->surplus_huge_pages--; 739685f3457SLee Schermerhorn h->surplus_huge_pages_node[next_nid]--; 740685f3457SLee Schermerhorn } 741e8c5c824SLee Schermerhorn update_and_free_page(h, page); 742e8c5c824SLee Schermerhorn ret = 1; 7439a76db09SLee Schermerhorn break; 744e8c5c824SLee Schermerhorn } 7456ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_free(h, nodes_allowed); 7469a76db09SLee Schermerhorn } while (next_nid != start_nid); 747e8c5c824SLee Schermerhorn 748e8c5c824SLee Schermerhorn return ret; 749e8c5c824SLee Schermerhorn } 750e8c5c824SLee Schermerhorn 751bf50bab2SNaoya Horiguchi static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) 7527893d1d5SAdam Litke { 7537893d1d5SAdam Litke struct page *page; 754bf50bab2SNaoya Horiguchi unsigned int r_nid; 7557893d1d5SAdam Litke 756aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 757aa888a74SAndi Kleen return NULL; 758aa888a74SAndi Kleen 759d1c3fb1fSNishanth Aravamudan /* 760d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 761d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 762d1c3fb1fSNishanth Aravamudan * overcommit 763d1c3fb1fSNishanth Aravamudan * 764d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 765d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 766d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 767d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 768d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 769d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 770d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 771d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 772d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 773d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 774d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 775d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 776d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 777d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 778d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 779d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 780d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 781d1c3fb1fSNishanth Aravamudan */ 782d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 783a5516438SAndi Kleen if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 784d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 785d1c3fb1fSNishanth Aravamudan return NULL; 786d1c3fb1fSNishanth Aravamudan } else { 787a5516438SAndi Kleen h->nr_huge_pages++; 788a5516438SAndi Kleen h->surplus_huge_pages++; 789d1c3fb1fSNishanth Aravamudan } 790d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 791d1c3fb1fSNishanth Aravamudan 792bf50bab2SNaoya Horiguchi if (nid == NUMA_NO_NODE) 793551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 794551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 795a5516438SAndi Kleen huge_page_order(h)); 796bf50bab2SNaoya Horiguchi else 797bf50bab2SNaoya Horiguchi page = alloc_pages_exact_node(nid, 798bf50bab2SNaoya Horiguchi htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 799bf50bab2SNaoya Horiguchi __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); 800d1c3fb1fSNishanth Aravamudan 801caff3a2cSGerald Schaefer if (page && arch_prepare_hugepage(page)) { 802caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 803ea5768c7SHillf Danton page = NULL; 804caff3a2cSGerald Schaefer } 805caff3a2cSGerald Schaefer 8067893d1d5SAdam Litke spin_lock(&hugetlb_lock); 807d1c3fb1fSNishanth Aravamudan if (page) { 808bf50bab2SNaoya Horiguchi r_nid = page_to_nid(page); 809d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 810d1c3fb1fSNishanth Aravamudan /* 811d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 812d1c3fb1fSNishanth Aravamudan */ 813bf50bab2SNaoya Horiguchi h->nr_huge_pages_node[r_nid]++; 814bf50bab2SNaoya Horiguchi h->surplus_huge_pages_node[r_nid]++; 8153b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 816d1c3fb1fSNishanth Aravamudan } else { 817a5516438SAndi Kleen h->nr_huge_pages--; 818a5516438SAndi Kleen h->surplus_huge_pages--; 8193b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 8207893d1d5SAdam Litke } 821d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 8227893d1d5SAdam Litke 8237893d1d5SAdam Litke return page; 8247893d1d5SAdam Litke } 8257893d1d5SAdam Litke 826e4e574b7SAdam Litke /* 827bf50bab2SNaoya Horiguchi * This allocation function is useful in the context where vma is irrelevant. 828bf50bab2SNaoya Horiguchi * E.g. soft-offlining uses this function because it only cares physical 829bf50bab2SNaoya Horiguchi * address of error page. 830bf50bab2SNaoya Horiguchi */ 831bf50bab2SNaoya Horiguchi struct page *alloc_huge_page_node(struct hstate *h, int nid) 832bf50bab2SNaoya Horiguchi { 833bf50bab2SNaoya Horiguchi struct page *page; 834bf50bab2SNaoya Horiguchi 835bf50bab2SNaoya Horiguchi spin_lock(&hugetlb_lock); 836bf50bab2SNaoya Horiguchi page = dequeue_huge_page_node(h, nid); 837bf50bab2SNaoya Horiguchi spin_unlock(&hugetlb_lock); 838bf50bab2SNaoya Horiguchi 839bf50bab2SNaoya Horiguchi if (!page) 840bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, nid); 841bf50bab2SNaoya Horiguchi 842bf50bab2SNaoya Horiguchi return page; 843bf50bab2SNaoya Horiguchi } 844bf50bab2SNaoya Horiguchi 845bf50bab2SNaoya Horiguchi /* 84625985edcSLucas De Marchi * Increase the hugetlb pool such that it can accommodate a reservation 847e4e574b7SAdam Litke * of size 'delta'. 848e4e574b7SAdam Litke */ 849a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta) 850e4e574b7SAdam Litke { 851e4e574b7SAdam Litke struct list_head surplus_list; 852e4e574b7SAdam Litke struct page *page, *tmp; 853e4e574b7SAdam Litke int ret, i; 854e4e574b7SAdam Litke int needed, allocated; 85528073b02SHillf Danton bool alloc_ok = true; 856e4e574b7SAdam Litke 857a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 858ac09b3a1SAdam Litke if (needed <= 0) { 859a5516438SAndi Kleen h->resv_huge_pages += delta; 860e4e574b7SAdam Litke return 0; 861ac09b3a1SAdam Litke } 862e4e574b7SAdam Litke 863e4e574b7SAdam Litke allocated = 0; 864e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 865e4e574b7SAdam Litke 866e4e574b7SAdam Litke ret = -ENOMEM; 867e4e574b7SAdam Litke retry: 868e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 869e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 870bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 87128073b02SHillf Danton if (!page) { 87228073b02SHillf Danton alloc_ok = false; 87328073b02SHillf Danton break; 87428073b02SHillf Danton } 875e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 876e4e574b7SAdam Litke } 87728073b02SHillf Danton allocated += i; 878e4e574b7SAdam Litke 879e4e574b7SAdam Litke /* 880e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 881e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 882e4e574b7SAdam Litke */ 883e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 884a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - 885a5516438SAndi Kleen (h->free_huge_pages + allocated); 88628073b02SHillf Danton if (needed > 0) { 88728073b02SHillf Danton if (alloc_ok) 888e4e574b7SAdam Litke goto retry; 88928073b02SHillf Danton /* 89028073b02SHillf Danton * We were not able to allocate enough pages to 89128073b02SHillf Danton * satisfy the entire reservation so we free what 89228073b02SHillf Danton * we've allocated so far. 89328073b02SHillf Danton */ 89428073b02SHillf Danton goto free; 89528073b02SHillf Danton } 896e4e574b7SAdam Litke /* 897e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 89825985edcSLucas De Marchi * needed to accommodate the reservation. Add the appropriate number 899e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 900ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 901ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 902ac09b3a1SAdam Litke * before they are reserved. 903e4e574b7SAdam Litke */ 904e4e574b7SAdam Litke needed += allocated; 905a5516438SAndi Kleen h->resv_huge_pages += delta; 906e4e574b7SAdam Litke ret = 0; 907a9869b83SNaoya Horiguchi 90819fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 90919fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 91019fc3f0aSAdam Litke if ((--needed) < 0) 91119fc3f0aSAdam Litke break; 91219fc3f0aSAdam Litke list_del(&page->lru); 913a9869b83SNaoya Horiguchi /* 914a9869b83SNaoya Horiguchi * This page is now managed by the hugetlb allocator and has 915a9869b83SNaoya Horiguchi * no users -- drop the buddy allocator's reference. 916a9869b83SNaoya Horiguchi */ 917a9869b83SNaoya Horiguchi put_page_testzero(page); 918a9869b83SNaoya Horiguchi VM_BUG_ON(page_count(page)); 919a5516438SAndi Kleen enqueue_huge_page(h, page); 92019fc3f0aSAdam Litke } 92128073b02SHillf Danton free: 922b0365c8dSHillf Danton spin_unlock(&hugetlb_lock); 92319fc3f0aSAdam Litke 92419fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 92519fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 926e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 927e4e574b7SAdam Litke list_del(&page->lru); 928a9869b83SNaoya Horiguchi put_page(page); 929a9869b83SNaoya Horiguchi } 930af767cbdSAdam Litke } 93119fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 932e4e574b7SAdam Litke 933e4e574b7SAdam Litke return ret; 934e4e574b7SAdam Litke } 935e4e574b7SAdam Litke 936e4e574b7SAdam Litke /* 937e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 938e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 939e4e574b7SAdam Litke * never used. 940685f3457SLee Schermerhorn * Called with hugetlb_lock held. 941e4e574b7SAdam Litke */ 942a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h, 943a5516438SAndi Kleen unsigned long unused_resv_pages) 944e4e574b7SAdam Litke { 945e4e574b7SAdam Litke unsigned long nr_pages; 946e4e574b7SAdam Litke 947ac09b3a1SAdam Litke /* Uncommit the reservation */ 948a5516438SAndi Kleen h->resv_huge_pages -= unused_resv_pages; 949ac09b3a1SAdam Litke 950aa888a74SAndi Kleen /* Cannot return gigantic pages currently */ 951aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 952aa888a74SAndi Kleen return; 953aa888a74SAndi Kleen 954a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 955e4e574b7SAdam Litke 956685f3457SLee Schermerhorn /* 957685f3457SLee Schermerhorn * We want to release as many surplus pages as possible, spread 9589b5e5d0fSLee Schermerhorn * evenly across all nodes with memory. Iterate across these nodes 9599b5e5d0fSLee Schermerhorn * until we can no longer free unreserved surplus pages. This occurs 9609b5e5d0fSLee Schermerhorn * when the nodes with surplus pages have no free pages. 9619b5e5d0fSLee Schermerhorn * free_pool_huge_page() will balance the the freed pages across the 9629b5e5d0fSLee Schermerhorn * on-line nodes with memory and will handle the hstate accounting. 963685f3457SLee Schermerhorn */ 964685f3457SLee Schermerhorn while (nr_pages--) { 9659b5e5d0fSLee Schermerhorn if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1)) 966685f3457SLee Schermerhorn break; 967e4e574b7SAdam Litke } 968e4e574b7SAdam Litke } 969e4e574b7SAdam Litke 970c37f9fb1SAndy Whitcroft /* 971c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 972c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 973c37f9fb1SAndy Whitcroft * reservation and actually increase quota before an allocation can occur. 974c37f9fb1SAndy Whitcroft * Where any new reservation would be required the reservation change is 975c37f9fb1SAndy Whitcroft * prepared, but not committed. Once the page has been quota'd allocated 976c37f9fb1SAndy Whitcroft * an instantiated the change should be committed via vma_commit_reservation. 977c37f9fb1SAndy Whitcroft * No action is required on failure. 978c37f9fb1SAndy Whitcroft */ 979e2f17d94SRoel Kluin static long vma_needs_reservation(struct hstate *h, 980a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 981c37f9fb1SAndy Whitcroft { 982c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 983c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 984c37f9fb1SAndy Whitcroft 985f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 986a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 987c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 988c37f9fb1SAndy Whitcroft idx, idx + 1); 989c37f9fb1SAndy Whitcroft 99084afd99bSAndy Whitcroft } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 991c37f9fb1SAndy Whitcroft return 1; 992c37f9fb1SAndy Whitcroft 99384afd99bSAndy Whitcroft } else { 994e2f17d94SRoel Kluin long err; 995a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 99684afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 99784afd99bSAndy Whitcroft 99884afd99bSAndy Whitcroft err = region_chg(&reservations->regions, idx, idx + 1); 99984afd99bSAndy Whitcroft if (err < 0) 100084afd99bSAndy Whitcroft return err; 1001c37f9fb1SAndy Whitcroft return 0; 1002c37f9fb1SAndy Whitcroft } 100384afd99bSAndy Whitcroft } 1004a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h, 1005a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 1006c37f9fb1SAndy Whitcroft { 1007c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 1008c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 1009c37f9fb1SAndy Whitcroft 1010f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 1011a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 1012c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 101384afd99bSAndy Whitcroft 101484afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1015a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 101684afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 101784afd99bSAndy Whitcroft 101884afd99bSAndy Whitcroft /* Mark this page used in the map. */ 101984afd99bSAndy Whitcroft region_add(&reservations->regions, idx, idx + 1); 1020c37f9fb1SAndy Whitcroft } 1021c37f9fb1SAndy Whitcroft } 1022c37f9fb1SAndy Whitcroft 1023348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 102404f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 1025348ea204SAdam Litke { 1026a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1027348ea204SAdam Litke struct page *page; 10282fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 1029a1e78772SMel Gorman struct inode *inode = mapping->host; 1030e2f17d94SRoel Kluin long chg; 10312fc39cecSAdam Litke 1032a1e78772SMel Gorman /* 1033a1e78772SMel Gorman * Processes that did not create the mapping will have no reserves and 1034a1e78772SMel Gorman * will not have accounted against quota. Check that the quota can be 1035a1e78772SMel Gorman * made before satisfying the allocation 1036c37f9fb1SAndy Whitcroft * MAP_NORESERVE mappings may also need pages and quota allocated 1037c37f9fb1SAndy Whitcroft * if no reserve mapping overlaps. 1038a1e78772SMel Gorman */ 1039a5516438SAndi Kleen chg = vma_needs_reservation(h, vma, addr); 1040c37f9fb1SAndy Whitcroft if (chg < 0) 1041e0dcd8a0SHugh Dickins return ERR_PTR(-VM_FAULT_OOM); 1042c37f9fb1SAndy Whitcroft if (chg) 1043a1e78772SMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 1044e0dcd8a0SHugh Dickins return ERR_PTR(-VM_FAULT_SIGBUS); 104590d8b7e6SAdam Litke 1046a1e78772SMel Gorman spin_lock(&hugetlb_lock); 1047a5516438SAndi Kleen page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 1048a1e78772SMel Gorman spin_unlock(&hugetlb_lock); 1049a1e78772SMel Gorman 1050a1e78772SMel Gorman if (!page) { 1051bf50bab2SNaoya Horiguchi page = alloc_buddy_huge_page(h, NUMA_NO_NODE); 1052a1e78772SMel Gorman if (!page) { 1053a1e78772SMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 10544a6018f7SMel Gorman return ERR_PTR(-VM_FAULT_SIGBUS); 1055a1e78772SMel Gorman } 1056a1e78772SMel Gorman } 1057a1e78772SMel Gorman 10582fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 1059a1e78772SMel Gorman 1060a5516438SAndi Kleen vma_commit_reservation(h, vma, addr); 1061c37f9fb1SAndy Whitcroft 10627893d1d5SAdam Litke return page; 1063b45b5bd6SDavid Gibson } 1064b45b5bd6SDavid Gibson 106591f47662SCyrill Gorcunov int __weak alloc_bootmem_huge_page(struct hstate *h) 1066aa888a74SAndi Kleen { 1067aa888a74SAndi Kleen struct huge_bootmem_page *m; 10689b5e5d0fSLee Schermerhorn int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 1069aa888a74SAndi Kleen 1070aa888a74SAndi Kleen while (nr_nodes) { 1071aa888a74SAndi Kleen void *addr; 1072aa888a74SAndi Kleen 1073aa888a74SAndi Kleen addr = __alloc_bootmem_node_nopanic( 10746ae11b27SLee Schermerhorn NODE_DATA(hstate_next_node_to_alloc(h, 10759b5e5d0fSLee Schermerhorn &node_states[N_HIGH_MEMORY])), 1076aa888a74SAndi Kleen huge_page_size(h), huge_page_size(h), 0); 1077aa888a74SAndi Kleen 1078aa888a74SAndi Kleen if (addr) { 1079aa888a74SAndi Kleen /* 1080aa888a74SAndi Kleen * Use the beginning of the huge page to store the 1081aa888a74SAndi Kleen * huge_bootmem_page struct (until gather_bootmem 1082aa888a74SAndi Kleen * puts them into the mem_map). 1083aa888a74SAndi Kleen */ 1084aa888a74SAndi Kleen m = addr; 1085aa888a74SAndi Kleen goto found; 1086aa888a74SAndi Kleen } 1087aa888a74SAndi Kleen nr_nodes--; 1088aa888a74SAndi Kleen } 1089aa888a74SAndi Kleen return 0; 1090aa888a74SAndi Kleen 1091aa888a74SAndi Kleen found: 1092aa888a74SAndi Kleen BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); 1093aa888a74SAndi Kleen /* Put them into a private list first because mem_map is not up yet */ 1094aa888a74SAndi Kleen list_add(&m->list, &huge_boot_pages); 1095aa888a74SAndi Kleen m->hstate = h; 1096aa888a74SAndi Kleen return 1; 1097aa888a74SAndi Kleen } 1098aa888a74SAndi Kleen 109918229df5SAndy Whitcroft static void prep_compound_huge_page(struct page *page, int order) 110018229df5SAndy Whitcroft { 110118229df5SAndy Whitcroft if (unlikely(order > (MAX_ORDER - 1))) 110218229df5SAndy Whitcroft prep_compound_gigantic_page(page, order); 110318229df5SAndy Whitcroft else 110418229df5SAndy Whitcroft prep_compound_page(page, order); 110518229df5SAndy Whitcroft } 110618229df5SAndy Whitcroft 1107aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */ 1108aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void) 1109aa888a74SAndi Kleen { 1110aa888a74SAndi Kleen struct huge_bootmem_page *m; 1111aa888a74SAndi Kleen 1112aa888a74SAndi Kleen list_for_each_entry(m, &huge_boot_pages, list) { 1113aa888a74SAndi Kleen struct hstate *h = m->hstate; 1114ee8f248dSBecky Bruce struct page *page; 1115ee8f248dSBecky Bruce 1116ee8f248dSBecky Bruce #ifdef CONFIG_HIGHMEM 1117ee8f248dSBecky Bruce page = pfn_to_page(m->phys >> PAGE_SHIFT); 1118ee8f248dSBecky Bruce free_bootmem_late((unsigned long)m, 1119ee8f248dSBecky Bruce sizeof(struct huge_bootmem_page)); 1120ee8f248dSBecky Bruce #else 1121ee8f248dSBecky Bruce page = virt_to_page(m); 1122ee8f248dSBecky Bruce #endif 1123aa888a74SAndi Kleen __ClearPageReserved(page); 1124aa888a74SAndi Kleen WARN_ON(page_count(page) != 1); 112518229df5SAndy Whitcroft prep_compound_huge_page(page, h->order); 1126aa888a74SAndi Kleen prep_new_huge_page(h, page, page_to_nid(page)); 1127b0320c7bSRafael Aquini /* 1128b0320c7bSRafael Aquini * If we had gigantic hugepages allocated at boot time, we need 1129b0320c7bSRafael Aquini * to restore the 'stolen' pages to totalram_pages in order to 1130b0320c7bSRafael Aquini * fix confusing memory reports from free(1) and another 1131b0320c7bSRafael Aquini * side-effects, like CommitLimit going negative. 1132b0320c7bSRafael Aquini */ 1133b0320c7bSRafael Aquini if (h->order > (MAX_ORDER - 1)) 1134b0320c7bSRafael Aquini totalram_pages += 1 << h->order; 1135aa888a74SAndi Kleen } 1136aa888a74SAndi Kleen } 1137aa888a74SAndi Kleen 11388faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 11391da177e4SLinus Torvalds { 11401da177e4SLinus Torvalds unsigned long i; 11411da177e4SLinus Torvalds 1142e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) { 1143aa888a74SAndi Kleen if (h->order >= MAX_ORDER) { 1144aa888a74SAndi Kleen if (!alloc_bootmem_huge_page(h)) 1145aa888a74SAndi Kleen break; 11469b5e5d0fSLee Schermerhorn } else if (!alloc_fresh_huge_page(h, 11479b5e5d0fSLee Schermerhorn &node_states[N_HIGH_MEMORY])) 11481da177e4SLinus Torvalds break; 11491da177e4SLinus Torvalds } 11508faa8b07SAndi Kleen h->max_huge_pages = i; 1151e5ff2159SAndi Kleen } 1152e5ff2159SAndi Kleen 1153e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void) 1154e5ff2159SAndi Kleen { 1155e5ff2159SAndi Kleen struct hstate *h; 1156e5ff2159SAndi Kleen 1157e5ff2159SAndi Kleen for_each_hstate(h) { 11588faa8b07SAndi Kleen /* oversize hugepages were init'ed in early boot */ 11598faa8b07SAndi Kleen if (h->order < MAX_ORDER) 11608faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(h); 1161e5ff2159SAndi Kleen } 1162e5ff2159SAndi Kleen } 1163e5ff2159SAndi Kleen 11644abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n) 11654abd32dbSAndi Kleen { 11664abd32dbSAndi Kleen if (n >= (1UL << 30)) 11674abd32dbSAndi Kleen sprintf(buf, "%lu GB", n >> 30); 11684abd32dbSAndi Kleen else if (n >= (1UL << 20)) 11694abd32dbSAndi Kleen sprintf(buf, "%lu MB", n >> 20); 11704abd32dbSAndi Kleen else 11714abd32dbSAndi Kleen sprintf(buf, "%lu KB", n >> 10); 11724abd32dbSAndi Kleen return buf; 11734abd32dbSAndi Kleen } 11744abd32dbSAndi Kleen 1175e5ff2159SAndi Kleen static void __init report_hugepages(void) 1176e5ff2159SAndi Kleen { 1177e5ff2159SAndi Kleen struct hstate *h; 1178e5ff2159SAndi Kleen 1179e5ff2159SAndi Kleen for_each_hstate(h) { 11804abd32dbSAndi Kleen char buf[32]; 11814abd32dbSAndi Kleen printk(KERN_INFO "HugeTLB registered %s page size, " 11824abd32dbSAndi Kleen "pre-allocated %ld pages\n", 11834abd32dbSAndi Kleen memfmt(buf, huge_page_size(h)), 11844abd32dbSAndi Kleen h->free_huge_pages); 1185e5ff2159SAndi Kleen } 1186e5ff2159SAndi Kleen } 1187e5ff2159SAndi Kleen 11881da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 11896ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count, 11906ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 11911da177e4SLinus Torvalds { 11924415cc8dSChristoph Lameter int i; 11934415cc8dSChristoph Lameter 1194aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1195aa888a74SAndi Kleen return; 1196aa888a74SAndi Kleen 11976ae11b27SLee Schermerhorn for_each_node_mask(i, *nodes_allowed) { 11981da177e4SLinus Torvalds struct page *page, *next; 1199a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i]; 1200a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) { 1201a5516438SAndi Kleen if (count >= h->nr_huge_pages) 12026b0c880dSAdam Litke return; 12031da177e4SLinus Torvalds if (PageHighMem(page)) 12041da177e4SLinus Torvalds continue; 12051da177e4SLinus Torvalds list_del(&page->lru); 1206e5ff2159SAndi Kleen update_and_free_page(h, page); 1207a5516438SAndi Kleen h->free_huge_pages--; 1208a5516438SAndi Kleen h->free_huge_pages_node[page_to_nid(page)]--; 12091da177e4SLinus Torvalds } 12101da177e4SLinus Torvalds } 12111da177e4SLinus Torvalds } 12121da177e4SLinus Torvalds #else 12136ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count, 12146ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 12151da177e4SLinus Torvalds { 12161da177e4SLinus Torvalds } 12171da177e4SLinus Torvalds #endif 12181da177e4SLinus Torvalds 121920a0307cSWu Fengguang /* 122020a0307cSWu Fengguang * Increment or decrement surplus_huge_pages. Keep node-specific counters 122120a0307cSWu Fengguang * balanced by operating on them in a round-robin fashion. 122220a0307cSWu Fengguang * Returns 1 if an adjustment was made. 122320a0307cSWu Fengguang */ 12246ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 12256ae11b27SLee Schermerhorn int delta) 122620a0307cSWu Fengguang { 1227e8c5c824SLee Schermerhorn int start_nid, next_nid; 122820a0307cSWu Fengguang int ret = 0; 122920a0307cSWu Fengguang 123020a0307cSWu Fengguang VM_BUG_ON(delta != -1 && delta != 1); 123120a0307cSWu Fengguang 1232e8c5c824SLee Schermerhorn if (delta < 0) 12336ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_alloc(h, nodes_allowed); 1234e8c5c824SLee Schermerhorn else 12356ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_free(h, nodes_allowed); 1236e8c5c824SLee Schermerhorn next_nid = start_nid; 1237e8c5c824SLee Schermerhorn 1238e8c5c824SLee Schermerhorn do { 1239e8c5c824SLee Schermerhorn int nid = next_nid; 1240e8c5c824SLee Schermerhorn if (delta < 0) { 1241e8c5c824SLee Schermerhorn /* 1242e8c5c824SLee Schermerhorn * To shrink on this node, there must be a surplus page 1243e8c5c824SLee Schermerhorn */ 12449a76db09SLee Schermerhorn if (!h->surplus_huge_pages_node[nid]) { 12456ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_alloc(h, 12466ae11b27SLee Schermerhorn nodes_allowed); 124720a0307cSWu Fengguang continue; 1248e8c5c824SLee Schermerhorn } 12499a76db09SLee Schermerhorn } 1250e8c5c824SLee Schermerhorn if (delta > 0) { 1251e8c5c824SLee Schermerhorn /* 1252e8c5c824SLee Schermerhorn * Surplus cannot exceed the total number of pages 1253e8c5c824SLee Schermerhorn */ 1254e8c5c824SLee Schermerhorn if (h->surplus_huge_pages_node[nid] >= 12559a76db09SLee Schermerhorn h->nr_huge_pages_node[nid]) { 12566ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_free(h, 12576ae11b27SLee Schermerhorn nodes_allowed); 125820a0307cSWu Fengguang continue; 1259e8c5c824SLee Schermerhorn } 12609a76db09SLee Schermerhorn } 126120a0307cSWu Fengguang 126220a0307cSWu Fengguang h->surplus_huge_pages += delta; 126320a0307cSWu Fengguang h->surplus_huge_pages_node[nid] += delta; 126420a0307cSWu Fengguang ret = 1; 126520a0307cSWu Fengguang break; 1266e8c5c824SLee Schermerhorn } while (next_nid != start_nid); 126720a0307cSWu Fengguang 126820a0307cSWu Fengguang return ret; 126920a0307cSWu Fengguang } 127020a0307cSWu Fengguang 1271a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 12726ae11b27SLee Schermerhorn static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, 12736ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 12741da177e4SLinus Torvalds { 12757893d1d5SAdam Litke unsigned long min_count, ret; 12761da177e4SLinus Torvalds 1277aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1278aa888a74SAndi Kleen return h->max_huge_pages; 1279aa888a74SAndi Kleen 12807893d1d5SAdam Litke /* 12817893d1d5SAdam Litke * Increase the pool size 12827893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 12837893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 1284d1c3fb1fSNishanth Aravamudan * 1285d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 1286d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 1287d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 1288d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 1289d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 12907893d1d5SAdam Litke */ 12911da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 1292a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 12936ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, -1)) 12947893d1d5SAdam Litke break; 12957893d1d5SAdam Litke } 12967893d1d5SAdam Litke 1297a5516438SAndi Kleen while (count > persistent_huge_pages(h)) { 12987893d1d5SAdam Litke /* 12997893d1d5SAdam Litke * If this allocation races such that we no longer need the 13007893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 13017893d1d5SAdam Litke * and reducing the surplus. 13027893d1d5SAdam Litke */ 13037893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 13046ae11b27SLee Schermerhorn ret = alloc_fresh_huge_page(h, nodes_allowed); 13057893d1d5SAdam Litke spin_lock(&hugetlb_lock); 13067893d1d5SAdam Litke if (!ret) 13077893d1d5SAdam Litke goto out; 13087893d1d5SAdam Litke 1309536240f2SMel Gorman /* Bail for signals. Probably ctrl-c from user */ 1310536240f2SMel Gorman if (signal_pending(current)) 1311536240f2SMel Gorman goto out; 13127893d1d5SAdam Litke } 13137893d1d5SAdam Litke 13147893d1d5SAdam Litke /* 13157893d1d5SAdam Litke * Decrease the pool size 13167893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 13177893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 13187893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 13197893d1d5SAdam Litke * to the desired size as pages become free. 1320d1c3fb1fSNishanth Aravamudan * 1321d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 1322d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 1323d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 1324d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 1325d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 1326d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 1327d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 13287893d1d5SAdam Litke */ 1329a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 13306b0c880dSAdam Litke min_count = max(count, min_count); 13316ae11b27SLee Schermerhorn try_to_free_low(h, min_count, nodes_allowed); 1332a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) { 13336ae11b27SLee Schermerhorn if (!free_pool_huge_page(h, nodes_allowed, 0)) 13341da177e4SLinus Torvalds break; 13351da177e4SLinus Torvalds } 1336a5516438SAndi Kleen while (count < persistent_huge_pages(h)) { 13376ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, 1)) 13387893d1d5SAdam Litke break; 13397893d1d5SAdam Litke } 13407893d1d5SAdam Litke out: 1341a5516438SAndi Kleen ret = persistent_huge_pages(h); 13421da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 13437893d1d5SAdam Litke return ret; 13441da177e4SLinus Torvalds } 13451da177e4SLinus Torvalds 1346a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \ 1347a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1348a3437870SNishanth Aravamudan 1349a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \ 1350a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = \ 1351a3437870SNishanth Aravamudan __ATTR(_name, 0644, _name##_show, _name##_store) 1352a3437870SNishanth Aravamudan 1353a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj; 1354a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1355a3437870SNishanth Aravamudan 13569a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 13579a305230SLee Schermerhorn 13589a305230SLee Schermerhorn static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 1359a3437870SNishanth Aravamudan { 1360a3437870SNishanth Aravamudan int i; 13619a305230SLee Schermerhorn 1362a3437870SNishanth Aravamudan for (i = 0; i < HUGE_MAX_HSTATE; i++) 13639a305230SLee Schermerhorn if (hstate_kobjs[i] == kobj) { 13649a305230SLee Schermerhorn if (nidp) 13659a305230SLee Schermerhorn *nidp = NUMA_NO_NODE; 1366a3437870SNishanth Aravamudan return &hstates[i]; 13679a305230SLee Schermerhorn } 13689a305230SLee Schermerhorn 13699a305230SLee Schermerhorn return kobj_to_node_hstate(kobj, nidp); 1370a3437870SNishanth Aravamudan } 1371a3437870SNishanth Aravamudan 137206808b08SLee Schermerhorn static ssize_t nr_hugepages_show_common(struct kobject *kobj, 1373a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1374a3437870SNishanth Aravamudan { 13759a305230SLee Schermerhorn struct hstate *h; 13769a305230SLee Schermerhorn unsigned long nr_huge_pages; 13779a305230SLee Schermerhorn int nid; 13789a305230SLee Schermerhorn 13799a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 13809a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 13819a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages; 13829a305230SLee Schermerhorn else 13839a305230SLee Schermerhorn nr_huge_pages = h->nr_huge_pages_node[nid]; 13849a305230SLee Schermerhorn 13859a305230SLee Schermerhorn return sprintf(buf, "%lu\n", nr_huge_pages); 1386a3437870SNishanth Aravamudan } 1387adbe8726SEric B Munson 138806808b08SLee Schermerhorn static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 138906808b08SLee Schermerhorn struct kobject *kobj, struct kobj_attribute *attr, 139006808b08SLee Schermerhorn const char *buf, size_t len) 1391a3437870SNishanth Aravamudan { 1392a3437870SNishanth Aravamudan int err; 13939a305230SLee Schermerhorn int nid; 139406808b08SLee Schermerhorn unsigned long count; 13959a305230SLee Schermerhorn struct hstate *h; 1396bad44b5bSDavid Rientjes NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); 1397a3437870SNishanth Aravamudan 139806808b08SLee Schermerhorn err = strict_strtoul(buf, 10, &count); 139973ae31e5SEric B Munson if (err) 1400adbe8726SEric B Munson goto out; 1401a3437870SNishanth Aravamudan 14029a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 1403adbe8726SEric B Munson if (h->order >= MAX_ORDER) { 1404adbe8726SEric B Munson err = -EINVAL; 1405adbe8726SEric B Munson goto out; 1406adbe8726SEric B Munson } 1407adbe8726SEric B Munson 14089a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) { 14099a305230SLee Schermerhorn /* 14109a305230SLee Schermerhorn * global hstate attribute 14119a305230SLee Schermerhorn */ 14129a305230SLee Schermerhorn if (!(obey_mempolicy && 14139a305230SLee Schermerhorn init_nodemask_of_mempolicy(nodes_allowed))) { 141406808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 14159a305230SLee Schermerhorn nodes_allowed = &node_states[N_HIGH_MEMORY]; 141606808b08SLee Schermerhorn } 14179a305230SLee Schermerhorn } else if (nodes_allowed) { 14189a305230SLee Schermerhorn /* 14199a305230SLee Schermerhorn * per node hstate attribute: adjust count to global, 14209a305230SLee Schermerhorn * but restrict alloc/free to the specified node. 14219a305230SLee Schermerhorn */ 14229a305230SLee Schermerhorn count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 14239a305230SLee Schermerhorn init_nodemask_of_node(nodes_allowed, nid); 14249a305230SLee Schermerhorn } else 14259a305230SLee Schermerhorn nodes_allowed = &node_states[N_HIGH_MEMORY]; 14269a305230SLee Schermerhorn 142706808b08SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); 1428a3437870SNishanth Aravamudan 14299b5e5d0fSLee Schermerhorn if (nodes_allowed != &node_states[N_HIGH_MEMORY]) 143006808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 143106808b08SLee Schermerhorn 143206808b08SLee Schermerhorn return len; 1433adbe8726SEric B Munson out: 1434adbe8726SEric B Munson NODEMASK_FREE(nodes_allowed); 1435adbe8726SEric B Munson return err; 143606808b08SLee Schermerhorn } 143706808b08SLee Schermerhorn 143806808b08SLee Schermerhorn static ssize_t nr_hugepages_show(struct kobject *kobj, 143906808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 144006808b08SLee Schermerhorn { 144106808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 144206808b08SLee Schermerhorn } 144306808b08SLee Schermerhorn 144406808b08SLee Schermerhorn static ssize_t nr_hugepages_store(struct kobject *kobj, 144506808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 144606808b08SLee Schermerhorn { 144706808b08SLee Schermerhorn return nr_hugepages_store_common(false, kobj, attr, buf, len); 1448a3437870SNishanth Aravamudan } 1449a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages); 1450a3437870SNishanth Aravamudan 145106808b08SLee Schermerhorn #ifdef CONFIG_NUMA 145206808b08SLee Schermerhorn 145306808b08SLee Schermerhorn /* 145406808b08SLee Schermerhorn * hstate attribute for optionally mempolicy-based constraint on persistent 145506808b08SLee Schermerhorn * huge page alloc/free. 145606808b08SLee Schermerhorn */ 145706808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 145806808b08SLee Schermerhorn struct kobj_attribute *attr, char *buf) 145906808b08SLee Schermerhorn { 146006808b08SLee Schermerhorn return nr_hugepages_show_common(kobj, attr, buf); 146106808b08SLee Schermerhorn } 146206808b08SLee Schermerhorn 146306808b08SLee Schermerhorn static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 146406808b08SLee Schermerhorn struct kobj_attribute *attr, const char *buf, size_t len) 146506808b08SLee Schermerhorn { 146606808b08SLee Schermerhorn return nr_hugepages_store_common(true, kobj, attr, buf, len); 146706808b08SLee Schermerhorn } 146806808b08SLee Schermerhorn HSTATE_ATTR(nr_hugepages_mempolicy); 146906808b08SLee Schermerhorn #endif 147006808b08SLee Schermerhorn 147106808b08SLee Schermerhorn 1472a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1473a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1474a3437870SNishanth Aravamudan { 14759a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1476a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1477a3437870SNishanth Aravamudan } 1478adbe8726SEric B Munson 1479a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1480a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1481a3437870SNishanth Aravamudan { 1482a3437870SNishanth Aravamudan int err; 1483a3437870SNishanth Aravamudan unsigned long input; 14849a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1485a3437870SNishanth Aravamudan 1486adbe8726SEric B Munson if (h->order >= MAX_ORDER) 1487adbe8726SEric B Munson return -EINVAL; 1488adbe8726SEric B Munson 1489a3437870SNishanth Aravamudan err = strict_strtoul(buf, 10, &input); 1490a3437870SNishanth Aravamudan if (err) 149173ae31e5SEric B Munson return err; 1492a3437870SNishanth Aravamudan 1493a3437870SNishanth Aravamudan spin_lock(&hugetlb_lock); 1494a3437870SNishanth Aravamudan h->nr_overcommit_huge_pages = input; 1495a3437870SNishanth Aravamudan spin_unlock(&hugetlb_lock); 1496a3437870SNishanth Aravamudan 1497a3437870SNishanth Aravamudan return count; 1498a3437870SNishanth Aravamudan } 1499a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages); 1500a3437870SNishanth Aravamudan 1501a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj, 1502a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1503a3437870SNishanth Aravamudan { 15049a305230SLee Schermerhorn struct hstate *h; 15059a305230SLee Schermerhorn unsigned long free_huge_pages; 15069a305230SLee Schermerhorn int nid; 15079a305230SLee Schermerhorn 15089a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 15099a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 15109a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages; 15119a305230SLee Schermerhorn else 15129a305230SLee Schermerhorn free_huge_pages = h->free_huge_pages_node[nid]; 15139a305230SLee Schermerhorn 15149a305230SLee Schermerhorn return sprintf(buf, "%lu\n", free_huge_pages); 1515a3437870SNishanth Aravamudan } 1516a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages); 1517a3437870SNishanth Aravamudan 1518a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj, 1519a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1520a3437870SNishanth Aravamudan { 15219a305230SLee Schermerhorn struct hstate *h = kobj_to_hstate(kobj, NULL); 1522a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->resv_huge_pages); 1523a3437870SNishanth Aravamudan } 1524a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages); 1525a3437870SNishanth Aravamudan 1526a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj, 1527a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1528a3437870SNishanth Aravamudan { 15299a305230SLee Schermerhorn struct hstate *h; 15309a305230SLee Schermerhorn unsigned long surplus_huge_pages; 15319a305230SLee Schermerhorn int nid; 15329a305230SLee Schermerhorn 15339a305230SLee Schermerhorn h = kobj_to_hstate(kobj, &nid); 15349a305230SLee Schermerhorn if (nid == NUMA_NO_NODE) 15359a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages; 15369a305230SLee Schermerhorn else 15379a305230SLee Schermerhorn surplus_huge_pages = h->surplus_huge_pages_node[nid]; 15389a305230SLee Schermerhorn 15399a305230SLee Schermerhorn return sprintf(buf, "%lu\n", surplus_huge_pages); 1540a3437870SNishanth Aravamudan } 1541a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages); 1542a3437870SNishanth Aravamudan 1543a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = { 1544a3437870SNishanth Aravamudan &nr_hugepages_attr.attr, 1545a3437870SNishanth Aravamudan &nr_overcommit_hugepages_attr.attr, 1546a3437870SNishanth Aravamudan &free_hugepages_attr.attr, 1547a3437870SNishanth Aravamudan &resv_hugepages_attr.attr, 1548a3437870SNishanth Aravamudan &surplus_hugepages_attr.attr, 154906808b08SLee Schermerhorn #ifdef CONFIG_NUMA 155006808b08SLee Schermerhorn &nr_hugepages_mempolicy_attr.attr, 155106808b08SLee Schermerhorn #endif 1552a3437870SNishanth Aravamudan NULL, 1553a3437870SNishanth Aravamudan }; 1554a3437870SNishanth Aravamudan 1555a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = { 1556a3437870SNishanth Aravamudan .attrs = hstate_attrs, 1557a3437870SNishanth Aravamudan }; 1558a3437870SNishanth Aravamudan 1559094e9539SJeff Mahoney static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 15609a305230SLee Schermerhorn struct kobject **hstate_kobjs, 15619a305230SLee Schermerhorn struct attribute_group *hstate_attr_group) 1562a3437870SNishanth Aravamudan { 1563a3437870SNishanth Aravamudan int retval; 15649a305230SLee Schermerhorn int hi = h - hstates; 1565a3437870SNishanth Aravamudan 15669a305230SLee Schermerhorn hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 15679a305230SLee Schermerhorn if (!hstate_kobjs[hi]) 1568a3437870SNishanth Aravamudan return -ENOMEM; 1569a3437870SNishanth Aravamudan 15709a305230SLee Schermerhorn retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 1571a3437870SNishanth Aravamudan if (retval) 15729a305230SLee Schermerhorn kobject_put(hstate_kobjs[hi]); 1573a3437870SNishanth Aravamudan 1574a3437870SNishanth Aravamudan return retval; 1575a3437870SNishanth Aravamudan } 1576a3437870SNishanth Aravamudan 1577a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void) 1578a3437870SNishanth Aravamudan { 1579a3437870SNishanth Aravamudan struct hstate *h; 1580a3437870SNishanth Aravamudan int err; 1581a3437870SNishanth Aravamudan 1582a3437870SNishanth Aravamudan hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 1583a3437870SNishanth Aravamudan if (!hugepages_kobj) 1584a3437870SNishanth Aravamudan return; 1585a3437870SNishanth Aravamudan 1586a3437870SNishanth Aravamudan for_each_hstate(h) { 15879a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 15889a305230SLee Schermerhorn hstate_kobjs, &hstate_attr_group); 1589a3437870SNishanth Aravamudan if (err) 1590a3437870SNishanth Aravamudan printk(KERN_ERR "Hugetlb: Unable to add hstate %s", 1591a3437870SNishanth Aravamudan h->name); 1592a3437870SNishanth Aravamudan } 1593a3437870SNishanth Aravamudan } 1594a3437870SNishanth Aravamudan 15959a305230SLee Schermerhorn #ifdef CONFIG_NUMA 15969a305230SLee Schermerhorn 15979a305230SLee Schermerhorn /* 15989a305230SLee Schermerhorn * node_hstate/s - associate per node hstate attributes, via their kobjects, 159910fbcf4cSKay Sievers * with node devices in node_devices[] using a parallel array. The array 160010fbcf4cSKay Sievers * index of a node device or _hstate == node id. 160110fbcf4cSKay Sievers * This is here to avoid any static dependency of the node device driver, in 16029a305230SLee Schermerhorn * the base kernel, on the hugetlb module. 16039a305230SLee Schermerhorn */ 16049a305230SLee Schermerhorn struct node_hstate { 16059a305230SLee Schermerhorn struct kobject *hugepages_kobj; 16069a305230SLee Schermerhorn struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 16079a305230SLee Schermerhorn }; 16089a305230SLee Schermerhorn struct node_hstate node_hstates[MAX_NUMNODES]; 16099a305230SLee Schermerhorn 16109a305230SLee Schermerhorn /* 161110fbcf4cSKay Sievers * A subset of global hstate attributes for node devices 16129a305230SLee Schermerhorn */ 16139a305230SLee Schermerhorn static struct attribute *per_node_hstate_attrs[] = { 16149a305230SLee Schermerhorn &nr_hugepages_attr.attr, 16159a305230SLee Schermerhorn &free_hugepages_attr.attr, 16169a305230SLee Schermerhorn &surplus_hugepages_attr.attr, 16179a305230SLee Schermerhorn NULL, 16189a305230SLee Schermerhorn }; 16199a305230SLee Schermerhorn 16209a305230SLee Schermerhorn static struct attribute_group per_node_hstate_attr_group = { 16219a305230SLee Schermerhorn .attrs = per_node_hstate_attrs, 16229a305230SLee Schermerhorn }; 16239a305230SLee Schermerhorn 16249a305230SLee Schermerhorn /* 162510fbcf4cSKay Sievers * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 16269a305230SLee Schermerhorn * Returns node id via non-NULL nidp. 16279a305230SLee Schermerhorn */ 16289a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 16299a305230SLee Schermerhorn { 16309a305230SLee Schermerhorn int nid; 16319a305230SLee Schermerhorn 16329a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) { 16339a305230SLee Schermerhorn struct node_hstate *nhs = &node_hstates[nid]; 16349a305230SLee Schermerhorn int i; 16359a305230SLee Schermerhorn for (i = 0; i < HUGE_MAX_HSTATE; i++) 16369a305230SLee Schermerhorn if (nhs->hstate_kobjs[i] == kobj) { 16379a305230SLee Schermerhorn if (nidp) 16389a305230SLee Schermerhorn *nidp = nid; 16399a305230SLee Schermerhorn return &hstates[i]; 16409a305230SLee Schermerhorn } 16419a305230SLee Schermerhorn } 16429a305230SLee Schermerhorn 16439a305230SLee Schermerhorn BUG(); 16449a305230SLee Schermerhorn return NULL; 16459a305230SLee Schermerhorn } 16469a305230SLee Schermerhorn 16479a305230SLee Schermerhorn /* 164810fbcf4cSKay Sievers * Unregister hstate attributes from a single node device. 16499a305230SLee Schermerhorn * No-op if no hstate attributes attached. 16509a305230SLee Schermerhorn */ 16519a305230SLee Schermerhorn void hugetlb_unregister_node(struct node *node) 16529a305230SLee Schermerhorn { 16539a305230SLee Schermerhorn struct hstate *h; 165410fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 16559a305230SLee Schermerhorn 16569a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 16579b5e5d0fSLee Schermerhorn return; /* no hstate attributes */ 16589a305230SLee Schermerhorn 16599a305230SLee Schermerhorn for_each_hstate(h) 16609a305230SLee Schermerhorn if (nhs->hstate_kobjs[h - hstates]) { 16619a305230SLee Schermerhorn kobject_put(nhs->hstate_kobjs[h - hstates]); 16629a305230SLee Schermerhorn nhs->hstate_kobjs[h - hstates] = NULL; 16639a305230SLee Schermerhorn } 16649a305230SLee Schermerhorn 16659a305230SLee Schermerhorn kobject_put(nhs->hugepages_kobj); 16669a305230SLee Schermerhorn nhs->hugepages_kobj = NULL; 16679a305230SLee Schermerhorn } 16689a305230SLee Schermerhorn 16699a305230SLee Schermerhorn /* 167010fbcf4cSKay Sievers * hugetlb module exit: unregister hstate attributes from node devices 16719a305230SLee Schermerhorn * that have them. 16729a305230SLee Schermerhorn */ 16739a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) 16749a305230SLee Schermerhorn { 16759a305230SLee Schermerhorn int nid; 16769a305230SLee Schermerhorn 16779a305230SLee Schermerhorn /* 167810fbcf4cSKay Sievers * disable node device registrations. 16799a305230SLee Schermerhorn */ 16809a305230SLee Schermerhorn register_hugetlbfs_with_node(NULL, NULL); 16819a305230SLee Schermerhorn 16829a305230SLee Schermerhorn /* 16839a305230SLee Schermerhorn * remove hstate attributes from any nodes that have them. 16849a305230SLee Schermerhorn */ 16859a305230SLee Schermerhorn for (nid = 0; nid < nr_node_ids; nid++) 16869a305230SLee Schermerhorn hugetlb_unregister_node(&node_devices[nid]); 16879a305230SLee Schermerhorn } 16889a305230SLee Schermerhorn 16899a305230SLee Schermerhorn /* 169010fbcf4cSKay Sievers * Register hstate attributes for a single node device. 16919a305230SLee Schermerhorn * No-op if attributes already registered. 16929a305230SLee Schermerhorn */ 16939a305230SLee Schermerhorn void hugetlb_register_node(struct node *node) 16949a305230SLee Schermerhorn { 16959a305230SLee Schermerhorn struct hstate *h; 169610fbcf4cSKay Sievers struct node_hstate *nhs = &node_hstates[node->dev.id]; 16979a305230SLee Schermerhorn int err; 16989a305230SLee Schermerhorn 16999a305230SLee Schermerhorn if (nhs->hugepages_kobj) 17009a305230SLee Schermerhorn return; /* already allocated */ 17019a305230SLee Schermerhorn 17029a305230SLee Schermerhorn nhs->hugepages_kobj = kobject_create_and_add("hugepages", 170310fbcf4cSKay Sievers &node->dev.kobj); 17049a305230SLee Schermerhorn if (!nhs->hugepages_kobj) 17059a305230SLee Schermerhorn return; 17069a305230SLee Schermerhorn 17079a305230SLee Schermerhorn for_each_hstate(h) { 17089a305230SLee Schermerhorn err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 17099a305230SLee Schermerhorn nhs->hstate_kobjs, 17109a305230SLee Schermerhorn &per_node_hstate_attr_group); 17119a305230SLee Schermerhorn if (err) { 17129a305230SLee Schermerhorn printk(KERN_ERR "Hugetlb: Unable to add hstate %s" 17139a305230SLee Schermerhorn " for node %d\n", 171410fbcf4cSKay Sievers h->name, node->dev.id); 17159a305230SLee Schermerhorn hugetlb_unregister_node(node); 17169a305230SLee Schermerhorn break; 17179a305230SLee Schermerhorn } 17189a305230SLee Schermerhorn } 17199a305230SLee Schermerhorn } 17209a305230SLee Schermerhorn 17219a305230SLee Schermerhorn /* 17229b5e5d0fSLee Schermerhorn * hugetlb init time: register hstate attributes for all registered node 172310fbcf4cSKay Sievers * devices of nodes that have memory. All on-line nodes should have 172410fbcf4cSKay Sievers * registered their associated device by this time. 17259a305230SLee Schermerhorn */ 17269a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) 17279a305230SLee Schermerhorn { 17289a305230SLee Schermerhorn int nid; 17299a305230SLee Schermerhorn 17309b5e5d0fSLee Schermerhorn for_each_node_state(nid, N_HIGH_MEMORY) { 17319a305230SLee Schermerhorn struct node *node = &node_devices[nid]; 173210fbcf4cSKay Sievers if (node->dev.id == nid) 17339a305230SLee Schermerhorn hugetlb_register_node(node); 17349a305230SLee Schermerhorn } 17359a305230SLee Schermerhorn 17369a305230SLee Schermerhorn /* 173710fbcf4cSKay Sievers * Let the node device driver know we're here so it can 17389a305230SLee Schermerhorn * [un]register hstate attributes on node hotplug. 17399a305230SLee Schermerhorn */ 17409a305230SLee Schermerhorn register_hugetlbfs_with_node(hugetlb_register_node, 17419a305230SLee Schermerhorn hugetlb_unregister_node); 17429a305230SLee Schermerhorn } 17439a305230SLee Schermerhorn #else /* !CONFIG_NUMA */ 17449a305230SLee Schermerhorn 17459a305230SLee Schermerhorn static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 17469a305230SLee Schermerhorn { 17479a305230SLee Schermerhorn BUG(); 17489a305230SLee Schermerhorn if (nidp) 17499a305230SLee Schermerhorn *nidp = -1; 17509a305230SLee Schermerhorn return NULL; 17519a305230SLee Schermerhorn } 17529a305230SLee Schermerhorn 17539a305230SLee Schermerhorn static void hugetlb_unregister_all_nodes(void) { } 17549a305230SLee Schermerhorn 17559a305230SLee Schermerhorn static void hugetlb_register_all_nodes(void) { } 17569a305230SLee Schermerhorn 17579a305230SLee Schermerhorn #endif 17589a305230SLee Schermerhorn 1759a3437870SNishanth Aravamudan static void __exit hugetlb_exit(void) 1760a3437870SNishanth Aravamudan { 1761a3437870SNishanth Aravamudan struct hstate *h; 1762a3437870SNishanth Aravamudan 17639a305230SLee Schermerhorn hugetlb_unregister_all_nodes(); 17649a305230SLee Schermerhorn 1765a3437870SNishanth Aravamudan for_each_hstate(h) { 1766a3437870SNishanth Aravamudan kobject_put(hstate_kobjs[h - hstates]); 1767a3437870SNishanth Aravamudan } 1768a3437870SNishanth Aravamudan 1769a3437870SNishanth Aravamudan kobject_put(hugepages_kobj); 1770a3437870SNishanth Aravamudan } 1771a3437870SNishanth Aravamudan module_exit(hugetlb_exit); 1772a3437870SNishanth Aravamudan 1773a3437870SNishanth Aravamudan static int __init hugetlb_init(void) 1774a3437870SNishanth Aravamudan { 17750ef89d25SBenjamin Herrenschmidt /* Some platform decide whether they support huge pages at boot 17760ef89d25SBenjamin Herrenschmidt * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 17770ef89d25SBenjamin Herrenschmidt * there is no such support 17780ef89d25SBenjamin Herrenschmidt */ 17790ef89d25SBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 17800ef89d25SBenjamin Herrenschmidt return 0; 1781a3437870SNishanth Aravamudan 1782e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) { 1783e11bfbfcSNick Piggin default_hstate_size = HPAGE_SIZE; 1784e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) 1785a3437870SNishanth Aravamudan hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 1786a3437870SNishanth Aravamudan } 1787e11bfbfcSNick Piggin default_hstate_idx = size_to_hstate(default_hstate_size) - hstates; 1788e11bfbfcSNick Piggin if (default_hstate_max_huge_pages) 1789e11bfbfcSNick Piggin default_hstate.max_huge_pages = default_hstate_max_huge_pages; 1790a3437870SNishanth Aravamudan 1791a3437870SNishanth Aravamudan hugetlb_init_hstates(); 1792a3437870SNishanth Aravamudan 1793aa888a74SAndi Kleen gather_bootmem_prealloc(); 1794aa888a74SAndi Kleen 1795a3437870SNishanth Aravamudan report_hugepages(); 1796a3437870SNishanth Aravamudan 1797a3437870SNishanth Aravamudan hugetlb_sysfs_init(); 1798a3437870SNishanth Aravamudan 17999a305230SLee Schermerhorn hugetlb_register_all_nodes(); 18009a305230SLee Schermerhorn 1801a3437870SNishanth Aravamudan return 0; 1802a3437870SNishanth Aravamudan } 1803a3437870SNishanth Aravamudan module_init(hugetlb_init); 1804a3437870SNishanth Aravamudan 1805a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */ 1806a3437870SNishanth Aravamudan void __init hugetlb_add_hstate(unsigned order) 1807a3437870SNishanth Aravamudan { 1808a3437870SNishanth Aravamudan struct hstate *h; 18098faa8b07SAndi Kleen unsigned long i; 18108faa8b07SAndi Kleen 1811a3437870SNishanth Aravamudan if (size_to_hstate(PAGE_SIZE << order)) { 1812a3437870SNishanth Aravamudan printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); 1813a3437870SNishanth Aravamudan return; 1814a3437870SNishanth Aravamudan } 1815a3437870SNishanth Aravamudan BUG_ON(max_hstate >= HUGE_MAX_HSTATE); 1816a3437870SNishanth Aravamudan BUG_ON(order == 0); 1817a3437870SNishanth Aravamudan h = &hstates[max_hstate++]; 1818a3437870SNishanth Aravamudan h->order = order; 1819a3437870SNishanth Aravamudan h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 18208faa8b07SAndi Kleen h->nr_huge_pages = 0; 18218faa8b07SAndi Kleen h->free_huge_pages = 0; 18228faa8b07SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i) 18238faa8b07SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]); 18249b5e5d0fSLee Schermerhorn h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]); 18259b5e5d0fSLee Schermerhorn h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]); 1826a3437870SNishanth Aravamudan snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1827a3437870SNishanth Aravamudan huge_page_size(h)/1024); 18288faa8b07SAndi Kleen 1829a3437870SNishanth Aravamudan parsed_hstate = h; 1830a3437870SNishanth Aravamudan } 1831a3437870SNishanth Aravamudan 1832e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s) 1833a3437870SNishanth Aravamudan { 1834a3437870SNishanth Aravamudan unsigned long *mhp; 18358faa8b07SAndi Kleen static unsigned long *last_mhp; 1836a3437870SNishanth Aravamudan 1837a3437870SNishanth Aravamudan /* 1838a3437870SNishanth Aravamudan * !max_hstate means we haven't parsed a hugepagesz= parameter yet, 1839a3437870SNishanth Aravamudan * so this hugepages= parameter goes to the "default hstate". 1840a3437870SNishanth Aravamudan */ 1841a3437870SNishanth Aravamudan if (!max_hstate) 1842a3437870SNishanth Aravamudan mhp = &default_hstate_max_huge_pages; 1843a3437870SNishanth Aravamudan else 1844a3437870SNishanth Aravamudan mhp = &parsed_hstate->max_huge_pages; 1845a3437870SNishanth Aravamudan 18468faa8b07SAndi Kleen if (mhp == last_mhp) { 18478faa8b07SAndi Kleen printk(KERN_WARNING "hugepages= specified twice without " 18488faa8b07SAndi Kleen "interleaving hugepagesz=, ignoring\n"); 18498faa8b07SAndi Kleen return 1; 18508faa8b07SAndi Kleen } 18518faa8b07SAndi Kleen 1852a3437870SNishanth Aravamudan if (sscanf(s, "%lu", mhp) <= 0) 1853a3437870SNishanth Aravamudan *mhp = 0; 1854a3437870SNishanth Aravamudan 18558faa8b07SAndi Kleen /* 18568faa8b07SAndi Kleen * Global state is always initialized later in hugetlb_init. 18578faa8b07SAndi Kleen * But we need to allocate >= MAX_ORDER hstates here early to still 18588faa8b07SAndi Kleen * use the bootmem allocator. 18598faa8b07SAndi Kleen */ 18608faa8b07SAndi Kleen if (max_hstate && parsed_hstate->order >= MAX_ORDER) 18618faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(parsed_hstate); 18628faa8b07SAndi Kleen 18638faa8b07SAndi Kleen last_mhp = mhp; 18648faa8b07SAndi Kleen 1865a3437870SNishanth Aravamudan return 1; 1866a3437870SNishanth Aravamudan } 1867e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup); 1868e11bfbfcSNick Piggin 1869e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s) 1870e11bfbfcSNick Piggin { 1871e11bfbfcSNick Piggin default_hstate_size = memparse(s, &s); 1872e11bfbfcSNick Piggin return 1; 1873e11bfbfcSNick Piggin } 1874e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup); 1875a3437870SNishanth Aravamudan 18768a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array) 18778a213460SNishanth Aravamudan { 18788a213460SNishanth Aravamudan int node; 18798a213460SNishanth Aravamudan unsigned int nr = 0; 18808a213460SNishanth Aravamudan 18818a213460SNishanth Aravamudan for_each_node_mask(node, cpuset_current_mems_allowed) 18828a213460SNishanth Aravamudan nr += array[node]; 18838a213460SNishanth Aravamudan 18848a213460SNishanth Aravamudan return nr; 18858a213460SNishanth Aravamudan } 18868a213460SNishanth Aravamudan 18878a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL 188806808b08SLee Schermerhorn static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 188906808b08SLee Schermerhorn struct ctl_table *table, int write, 189006808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 18911da177e4SLinus Torvalds { 1892e5ff2159SAndi Kleen struct hstate *h = &default_hstate; 1893e5ff2159SAndi Kleen unsigned long tmp; 189408d4a246SMichal Hocko int ret; 1895e5ff2159SAndi Kleen 1896e5ff2159SAndi Kleen tmp = h->max_huge_pages; 1897e5ff2159SAndi Kleen 1898adbe8726SEric B Munson if (write && h->order >= MAX_ORDER) 1899adbe8726SEric B Munson return -EINVAL; 1900adbe8726SEric B Munson 1901e5ff2159SAndi Kleen table->data = &tmp; 1902e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 190308d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 190408d4a246SMichal Hocko if (ret) 190508d4a246SMichal Hocko goto out; 1906e5ff2159SAndi Kleen 190706808b08SLee Schermerhorn if (write) { 1908bad44b5bSDavid Rientjes NODEMASK_ALLOC(nodemask_t, nodes_allowed, 1909bad44b5bSDavid Rientjes GFP_KERNEL | __GFP_NORETRY); 191006808b08SLee Schermerhorn if (!(obey_mempolicy && 191106808b08SLee Schermerhorn init_nodemask_of_mempolicy(nodes_allowed))) { 191206808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 191306808b08SLee Schermerhorn nodes_allowed = &node_states[N_HIGH_MEMORY]; 191406808b08SLee Schermerhorn } 191506808b08SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed); 191606808b08SLee Schermerhorn 191706808b08SLee Schermerhorn if (nodes_allowed != &node_states[N_HIGH_MEMORY]) 191806808b08SLee Schermerhorn NODEMASK_FREE(nodes_allowed); 191906808b08SLee Schermerhorn } 192008d4a246SMichal Hocko out: 192108d4a246SMichal Hocko return ret; 19221da177e4SLinus Torvalds } 1923396faf03SMel Gorman 192406808b08SLee Schermerhorn int hugetlb_sysctl_handler(struct ctl_table *table, int write, 192506808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 192606808b08SLee Schermerhorn { 192706808b08SLee Schermerhorn 192806808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(false, table, write, 192906808b08SLee Schermerhorn buffer, length, ppos); 193006808b08SLee Schermerhorn } 193106808b08SLee Schermerhorn 193206808b08SLee Schermerhorn #ifdef CONFIG_NUMA 193306808b08SLee Schermerhorn int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 193406808b08SLee Schermerhorn void __user *buffer, size_t *length, loff_t *ppos) 193506808b08SLee Schermerhorn { 193606808b08SLee Schermerhorn return hugetlb_sysctl_handler_common(true, table, write, 193706808b08SLee Schermerhorn buffer, length, ppos); 193806808b08SLee Schermerhorn } 193906808b08SLee Schermerhorn #endif /* CONFIG_NUMA */ 194006808b08SLee Schermerhorn 1941396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 19428d65af78SAlexey Dobriyan void __user *buffer, 1943396faf03SMel Gorman size_t *length, loff_t *ppos) 1944396faf03SMel Gorman { 19458d65af78SAlexey Dobriyan proc_dointvec(table, write, buffer, length, ppos); 1946396faf03SMel Gorman if (hugepages_treat_as_movable) 1947396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 1948396faf03SMel Gorman else 1949396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 1950396faf03SMel Gorman return 0; 1951396faf03SMel Gorman } 1952396faf03SMel Gorman 1953a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 19548d65af78SAlexey Dobriyan void __user *buffer, 1955a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 1956a3d0c6aaSNishanth Aravamudan { 1957a5516438SAndi Kleen struct hstate *h = &default_hstate; 1958e5ff2159SAndi Kleen unsigned long tmp; 195908d4a246SMichal Hocko int ret; 1960e5ff2159SAndi Kleen 1961e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages; 1962e5ff2159SAndi Kleen 1963adbe8726SEric B Munson if (write && h->order >= MAX_ORDER) 1964adbe8726SEric B Munson return -EINVAL; 1965adbe8726SEric B Munson 1966e5ff2159SAndi Kleen table->data = &tmp; 1967e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 196808d4a246SMichal Hocko ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); 196908d4a246SMichal Hocko if (ret) 197008d4a246SMichal Hocko goto out; 1971e5ff2159SAndi Kleen 1972e5ff2159SAndi Kleen if (write) { 1973064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 1974e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp; 1975a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 1976e5ff2159SAndi Kleen } 197708d4a246SMichal Hocko out: 197808d4a246SMichal Hocko return ret; 1979a3d0c6aaSNishanth Aravamudan } 1980a3d0c6aaSNishanth Aravamudan 19811da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 19821da177e4SLinus Torvalds 1983e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m) 19841da177e4SLinus Torvalds { 1985a5516438SAndi Kleen struct hstate *h = &default_hstate; 1986e1759c21SAlexey Dobriyan seq_printf(m, 19871da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 19881da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 1989b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 19907893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 19914f98a2feSRik van Riel "Hugepagesize: %8lu kB\n", 1992a5516438SAndi Kleen h->nr_huge_pages, 1993a5516438SAndi Kleen h->free_huge_pages, 1994a5516438SAndi Kleen h->resv_huge_pages, 1995a5516438SAndi Kleen h->surplus_huge_pages, 1996a5516438SAndi Kleen 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 19971da177e4SLinus Torvalds } 19981da177e4SLinus Torvalds 19991da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 20001da177e4SLinus Torvalds { 2001a5516438SAndi Kleen struct hstate *h = &default_hstate; 20021da177e4SLinus Torvalds return sprintf(buf, 20031da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 2004a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 2005a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 2006a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid], 2007a5516438SAndi Kleen nid, h->free_huge_pages_node[nid], 2008a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]); 20091da177e4SLinus Torvalds } 20101da177e4SLinus Torvalds 20111da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 20121da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 20131da177e4SLinus Torvalds { 2014a5516438SAndi Kleen struct hstate *h = &default_hstate; 2015a5516438SAndi Kleen return h->nr_huge_pages * pages_per_huge_page(h); 20161da177e4SLinus Torvalds } 20171da177e4SLinus Torvalds 2018a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta) 2019fc1b8a73SMel Gorman { 2020fc1b8a73SMel Gorman int ret = -ENOMEM; 2021fc1b8a73SMel Gorman 2022fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 2023fc1b8a73SMel Gorman /* 2024fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 2025fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 2026fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 2027fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 2028fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 2029fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 2030fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 2031fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 2032fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 2033fc1b8a73SMel Gorman * 2034fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 2035fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 2036fc1b8a73SMel Gorman * we fall back to check against current free page availability as 2037fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 2038fc1b8a73SMel Gorman * semantics that cpuset has. 2039fc1b8a73SMel Gorman */ 2040fc1b8a73SMel Gorman if (delta > 0) { 2041a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0) 2042fc1b8a73SMel Gorman goto out; 2043fc1b8a73SMel Gorman 2044a5516438SAndi Kleen if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 2045a5516438SAndi Kleen return_unused_surplus_pages(h, delta); 2046fc1b8a73SMel Gorman goto out; 2047fc1b8a73SMel Gorman } 2048fc1b8a73SMel Gorman } 2049fc1b8a73SMel Gorman 2050fc1b8a73SMel Gorman ret = 0; 2051fc1b8a73SMel Gorman if (delta < 0) 2052a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta); 2053fc1b8a73SMel Gorman 2054fc1b8a73SMel Gorman out: 2055fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 2056fc1b8a73SMel Gorman return ret; 2057fc1b8a73SMel Gorman } 2058fc1b8a73SMel Gorman 205984afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 206084afd99bSAndy Whitcroft { 206184afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 206284afd99bSAndy Whitcroft 206384afd99bSAndy Whitcroft /* 206484afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 206584afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 206684afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 206725985edcSLucas De Marchi * has a reference to the reservation map it cannot disappear until 206884afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 206984afd99bSAndy Whitcroft * new reference here without additional locking. 207084afd99bSAndy Whitcroft */ 207184afd99bSAndy Whitcroft if (reservations) 207284afd99bSAndy Whitcroft kref_get(&reservations->refs); 207384afd99bSAndy Whitcroft } 207484afd99bSAndy Whitcroft 2075a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 2076a1e78772SMel Gorman { 2077a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 207884afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 207984afd99bSAndy Whitcroft unsigned long reserve; 208084afd99bSAndy Whitcroft unsigned long start; 208184afd99bSAndy Whitcroft unsigned long end; 208284afd99bSAndy Whitcroft 208384afd99bSAndy Whitcroft if (reservations) { 2084a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start); 2085a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end); 208684afd99bSAndy Whitcroft 208784afd99bSAndy Whitcroft reserve = (end - start) - 208884afd99bSAndy Whitcroft region_count(&reservations->regions, start, end); 208984afd99bSAndy Whitcroft 209084afd99bSAndy Whitcroft kref_put(&reservations->refs, resv_map_release); 209184afd99bSAndy Whitcroft 20927251ff78SAdam Litke if (reserve) { 2093a5516438SAndi Kleen hugetlb_acct_memory(h, -reserve); 20947251ff78SAdam Litke hugetlb_put_quota(vma->vm_file->f_mapping, reserve); 20957251ff78SAdam Litke } 2096a1e78772SMel Gorman } 209784afd99bSAndy Whitcroft } 2098a1e78772SMel Gorman 20991da177e4SLinus Torvalds /* 21001da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 21011da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 21021da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 21031da177e4SLinus Torvalds * this far. 21041da177e4SLinus Torvalds */ 2105d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 21061da177e4SLinus Torvalds { 21071da177e4SLinus Torvalds BUG(); 2108d0217ac0SNick Piggin return 0; 21091da177e4SLinus Torvalds } 21101da177e4SLinus Torvalds 2111f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = { 2112d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 211384afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 2114a1e78772SMel Gorman .close = hugetlb_vm_op_close, 21151da177e4SLinus Torvalds }; 21161da177e4SLinus Torvalds 21171e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 21181e8f889bSDavid Gibson int writable) 211963551ae0SDavid Gibson { 212063551ae0SDavid Gibson pte_t entry; 212163551ae0SDavid Gibson 21221e8f889bSDavid Gibson if (writable) { 212363551ae0SDavid Gibson entry = 212463551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 212563551ae0SDavid Gibson } else { 21267f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 212763551ae0SDavid Gibson } 212863551ae0SDavid Gibson entry = pte_mkyoung(entry); 212963551ae0SDavid Gibson entry = pte_mkhuge(entry); 213063551ae0SDavid Gibson 213163551ae0SDavid Gibson return entry; 213263551ae0SDavid Gibson } 213363551ae0SDavid Gibson 21341e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 21351e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 21361e8f889bSDavid Gibson { 21371e8f889bSDavid Gibson pte_t entry; 21381e8f889bSDavid Gibson 21397f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 214032f84528SChris Forbes if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 21414b3073e1SRussell King update_mmu_cache(vma, address, ptep); 21421e8f889bSDavid Gibson } 21431e8f889bSDavid Gibson 21441e8f889bSDavid Gibson 214563551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 214663551ae0SDavid Gibson struct vm_area_struct *vma) 214763551ae0SDavid Gibson { 214863551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 214963551ae0SDavid Gibson struct page *ptepage; 21501c59827dSHugh Dickins unsigned long addr; 21511e8f889bSDavid Gibson int cow; 2152a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2153a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 21541e8f889bSDavid Gibson 21551e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 215663551ae0SDavid Gibson 2157a5516438SAndi Kleen for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 2158c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 2159c74df32cSHugh Dickins if (!src_pte) 2160c74df32cSHugh Dickins continue; 2161a5516438SAndi Kleen dst_pte = huge_pte_alloc(dst, addr, sz); 216263551ae0SDavid Gibson if (!dst_pte) 216363551ae0SDavid Gibson goto nomem; 2164c5c99429SLarry Woodman 2165c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 2166c5c99429SLarry Woodman if (dst_pte == src_pte) 2167c5c99429SLarry Woodman continue; 2168c5c99429SLarry Woodman 2169c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 217046478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 21717f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 21721e8f889bSDavid Gibson if (cow) 21737f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 21747f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 217563551ae0SDavid Gibson ptepage = pte_page(entry); 217663551ae0SDavid Gibson get_page(ptepage); 21770fe6e20bSNaoya Horiguchi page_dup_rmap(ptepage); 217863551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 21791c59827dSHugh Dickins } 21801c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 2181c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 218263551ae0SDavid Gibson } 218363551ae0SDavid Gibson return 0; 218463551ae0SDavid Gibson 218563551ae0SDavid Gibson nomem: 218663551ae0SDavid Gibson return -ENOMEM; 218763551ae0SDavid Gibson } 218863551ae0SDavid Gibson 2189290408d4SNaoya Horiguchi static int is_hugetlb_entry_migration(pte_t pte) 2190290408d4SNaoya Horiguchi { 2191290408d4SNaoya Horiguchi swp_entry_t swp; 2192290408d4SNaoya Horiguchi 2193290408d4SNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 2194290408d4SNaoya Horiguchi return 0; 2195290408d4SNaoya Horiguchi swp = pte_to_swp_entry(pte); 219632f84528SChris Forbes if (non_swap_entry(swp) && is_migration_entry(swp)) 2197290408d4SNaoya Horiguchi return 1; 219832f84528SChris Forbes else 2199290408d4SNaoya Horiguchi return 0; 2200290408d4SNaoya Horiguchi } 2201290408d4SNaoya Horiguchi 2202fd6a03edSNaoya Horiguchi static int is_hugetlb_entry_hwpoisoned(pte_t pte) 2203fd6a03edSNaoya Horiguchi { 2204fd6a03edSNaoya Horiguchi swp_entry_t swp; 2205fd6a03edSNaoya Horiguchi 2206fd6a03edSNaoya Horiguchi if (huge_pte_none(pte) || pte_present(pte)) 2207fd6a03edSNaoya Horiguchi return 0; 2208fd6a03edSNaoya Horiguchi swp = pte_to_swp_entry(pte); 220932f84528SChris Forbes if (non_swap_entry(swp) && is_hwpoison_entry(swp)) 2210fd6a03edSNaoya Horiguchi return 1; 221132f84528SChris Forbes else 2212fd6a03edSNaoya Horiguchi return 0; 2213fd6a03edSNaoya Horiguchi } 2214fd6a03edSNaoya Horiguchi 2215502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 221604f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 221763551ae0SDavid Gibson { 221863551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 221963551ae0SDavid Gibson unsigned long address; 2220c7546f8fSDavid Gibson pte_t *ptep; 222163551ae0SDavid Gibson pte_t pte; 222263551ae0SDavid Gibson struct page *page; 2223fe1668aeSChen, Kenneth W struct page *tmp; 2224a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2225a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 2226a5516438SAndi Kleen 2227c0a499c2SChen, Kenneth W /* 22283d48ae45SPeter Zijlstra * A page gathering list, protected by per file i_mmap_mutex. The 2229c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 2230c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 2231c0a499c2SChen, Kenneth W */ 2232fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 223363551ae0SDavid Gibson 223463551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 2235a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h)); 2236a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h)); 223763551ae0SDavid Gibson 2238cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(mm, start, end); 2239508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 2240a5516438SAndi Kleen for (address = start; address < end; address += sz) { 2241c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 2242c7546f8fSDavid Gibson if (!ptep) 2243c7546f8fSDavid Gibson continue; 2244c7546f8fSDavid Gibson 224539dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 224639dde65cSChen, Kenneth W continue; 224739dde65cSChen, Kenneth W 224804f2cbe3SMel Gorman /* 224904f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 225004f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 225104f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 225204f2cbe3SMel Gorman */ 225304f2cbe3SMel Gorman if (ref_page) { 225404f2cbe3SMel Gorman pte = huge_ptep_get(ptep); 225504f2cbe3SMel Gorman if (huge_pte_none(pte)) 225604f2cbe3SMel Gorman continue; 225704f2cbe3SMel Gorman page = pte_page(pte); 225804f2cbe3SMel Gorman if (page != ref_page) 225904f2cbe3SMel Gorman continue; 226004f2cbe3SMel Gorman 226104f2cbe3SMel Gorman /* 226204f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 226304f2cbe3SMel Gorman * future faults in this VMA will fail rather than 226404f2cbe3SMel Gorman * looking like data was lost 226504f2cbe3SMel Gorman */ 226604f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 226704f2cbe3SMel Gorman } 226804f2cbe3SMel Gorman 2269c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 22707f2e9525SGerald Schaefer if (huge_pte_none(pte)) 227163551ae0SDavid Gibson continue; 2272c7546f8fSDavid Gibson 2273fd6a03edSNaoya Horiguchi /* 2274fd6a03edSNaoya Horiguchi * HWPoisoned hugepage is already unmapped and dropped reference 2275fd6a03edSNaoya Horiguchi */ 2276fd6a03edSNaoya Horiguchi if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) 2277fd6a03edSNaoya Horiguchi continue; 2278fd6a03edSNaoya Horiguchi 227963551ae0SDavid Gibson page = pte_page(pte); 22806649a386SKen Chen if (pte_dirty(pte)) 22816649a386SKen Chen set_page_dirty(page); 2282fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 228363551ae0SDavid Gibson } 2284508034a3SHugh Dickins flush_tlb_range(vma, start, end); 2285cd2934a3SAl Viro spin_unlock(&mm->page_table_lock); 2286cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start, end); 2287fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 22880fe6e20bSNaoya Horiguchi page_remove_rmap(page); 2289fe1668aeSChen, Kenneth W list_del(&page->lru); 2290fe1668aeSChen, Kenneth W put_page(page); 2291fe1668aeSChen, Kenneth W } 22921da177e4SLinus Torvalds } 229363551ae0SDavid Gibson 2294502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 229504f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 2296502717f4SChen, Kenneth W { 22973d48ae45SPeter Zijlstra mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 229804f2cbe3SMel Gorman __unmap_hugepage_range(vma, start, end, ref_page); 22993d48ae45SPeter Zijlstra mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 2300502717f4SChen, Kenneth W } 2301502717f4SChen, Kenneth W 230204f2cbe3SMel Gorman /* 230304f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 230404f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 230504f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 230604f2cbe3SMel Gorman * same region. 230704f2cbe3SMel Gorman */ 23082a4b3dedSHarvey Harrison static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 23092a4b3dedSHarvey Harrison struct page *page, unsigned long address) 231004f2cbe3SMel Gorman { 23117526674dSAdam Litke struct hstate *h = hstate_vma(vma); 231204f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 231304f2cbe3SMel Gorman struct address_space *mapping; 231404f2cbe3SMel Gorman struct prio_tree_iter iter; 231504f2cbe3SMel Gorman pgoff_t pgoff; 231604f2cbe3SMel Gorman 231704f2cbe3SMel Gorman /* 231804f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 231904f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 232004f2cbe3SMel Gorman */ 23217526674dSAdam Litke address = address & huge_page_mask(h); 23220c176d52SHillf Danton pgoff = vma_hugecache_offset(h, vma, address); 232304f2cbe3SMel Gorman mapping = (struct address_space *)page_private(page); 232404f2cbe3SMel Gorman 23254eb2b1dcSMel Gorman /* 23264eb2b1dcSMel Gorman * Take the mapping lock for the duration of the table walk. As 23274eb2b1dcSMel Gorman * this mapping should be shared between all the VMAs, 23284eb2b1dcSMel Gorman * __unmap_hugepage_range() is called as the lock is already held 23294eb2b1dcSMel Gorman */ 23303d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 233104f2cbe3SMel Gorman vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 233204f2cbe3SMel Gorman /* Do not unmap the current VMA */ 233304f2cbe3SMel Gorman if (iter_vma == vma) 233404f2cbe3SMel Gorman continue; 233504f2cbe3SMel Gorman 233604f2cbe3SMel Gorman /* 233704f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 233804f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 233904f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 234004f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 234104f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 234204f2cbe3SMel Gorman */ 234304f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 23444eb2b1dcSMel Gorman __unmap_hugepage_range(iter_vma, 23457526674dSAdam Litke address, address + huge_page_size(h), 234604f2cbe3SMel Gorman page); 234704f2cbe3SMel Gorman } 23483d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 234904f2cbe3SMel Gorman 235004f2cbe3SMel Gorman return 1; 235104f2cbe3SMel Gorman } 235204f2cbe3SMel Gorman 23530fe6e20bSNaoya Horiguchi /* 23540fe6e20bSNaoya Horiguchi * Hugetlb_cow() should be called with page lock of the original hugepage held. 2355ef009b25SMichal Hocko * Called with hugetlb_instantiation_mutex held and pte_page locked so we 2356ef009b25SMichal Hocko * cannot race with other handlers or page migration. 2357ef009b25SMichal Hocko * Keep the pte_same checks anyway to make transition from the mutex easier. 23580fe6e20bSNaoya Horiguchi */ 23591e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 236004f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 236104f2cbe3SMel Gorman struct page *pagecache_page) 23621e8f889bSDavid Gibson { 2363a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 23641e8f889bSDavid Gibson struct page *old_page, *new_page; 236579ac6ba4SDavid Gibson int avoidcopy; 236604f2cbe3SMel Gorman int outside_reserve = 0; 23671e8f889bSDavid Gibson 23681e8f889bSDavid Gibson old_page = pte_page(pte); 23691e8f889bSDavid Gibson 237004f2cbe3SMel Gorman retry_avoidcopy: 23711e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 23721e8f889bSDavid Gibson * and just make the page writable */ 23730fe6e20bSNaoya Horiguchi avoidcopy = (page_mapcount(old_page) == 1); 23741e8f889bSDavid Gibson if (avoidcopy) { 23750fe6e20bSNaoya Horiguchi if (PageAnon(old_page)) 23760fe6e20bSNaoya Horiguchi page_move_anon_rmap(old_page, vma, address); 23771e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 237883c54070SNick Piggin return 0; 23791e8f889bSDavid Gibson } 23801e8f889bSDavid Gibson 238104f2cbe3SMel Gorman /* 238204f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 238304f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 238404f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 238504f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 238604f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 238704f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 238804f2cbe3SMel Gorman * of the full address range. 238904f2cbe3SMel Gorman */ 2390f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE) && 239104f2cbe3SMel Gorman is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 239204f2cbe3SMel Gorman old_page != pagecache_page) 239304f2cbe3SMel Gorman outside_reserve = 1; 239404f2cbe3SMel Gorman 23951e8f889bSDavid Gibson page_cache_get(old_page); 2396b76c8cfbSLarry Woodman 2397b76c8cfbSLarry Woodman /* Drop page_table_lock as buddy allocator may be called */ 2398b76c8cfbSLarry Woodman spin_unlock(&mm->page_table_lock); 239904f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 24001e8f889bSDavid Gibson 24012fc39cecSAdam Litke if (IS_ERR(new_page)) { 24021e8f889bSDavid Gibson page_cache_release(old_page); 240304f2cbe3SMel Gorman 240404f2cbe3SMel Gorman /* 240504f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 240604f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 240704f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 240804f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 240904f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 241004f2cbe3SMel Gorman */ 241104f2cbe3SMel Gorman if (outside_reserve) { 241204f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 241304f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 241404f2cbe3SMel Gorman BUG_ON(page_count(old_page) != 1); 241504f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 2416b76c8cfbSLarry Woodman spin_lock(&mm->page_table_lock); 2417a734bcc8SHillf Danton ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 2418a734bcc8SHillf Danton if (likely(pte_same(huge_ptep_get(ptep), pte))) 241904f2cbe3SMel Gorman goto retry_avoidcopy; 2420a734bcc8SHillf Danton /* 2421a734bcc8SHillf Danton * race occurs while re-acquiring page_table_lock, and 2422a734bcc8SHillf Danton * our job is done. 2423a734bcc8SHillf Danton */ 2424a734bcc8SHillf Danton return 0; 242504f2cbe3SMel Gorman } 242604f2cbe3SMel Gorman WARN_ON_ONCE(1); 242704f2cbe3SMel Gorman } 242804f2cbe3SMel Gorman 2429b76c8cfbSLarry Woodman /* Caller expects lock to be held */ 2430b76c8cfbSLarry Woodman spin_lock(&mm->page_table_lock); 24312fc39cecSAdam Litke return -PTR_ERR(new_page); 24321e8f889bSDavid Gibson } 24331e8f889bSDavid Gibson 24340fe6e20bSNaoya Horiguchi /* 24350fe6e20bSNaoya Horiguchi * When the original hugepage is shared one, it does not have 24360fe6e20bSNaoya Horiguchi * anon_vma prepared. 24370fe6e20bSNaoya Horiguchi */ 243844e2aa93SDean Nelson if (unlikely(anon_vma_prepare(vma))) { 2439ea4039a3SHillf Danton page_cache_release(new_page); 2440ea4039a3SHillf Danton page_cache_release(old_page); 244144e2aa93SDean Nelson /* Caller expects lock to be held */ 244244e2aa93SDean Nelson spin_lock(&mm->page_table_lock); 24430fe6e20bSNaoya Horiguchi return VM_FAULT_OOM; 244444e2aa93SDean Nelson } 24450fe6e20bSNaoya Horiguchi 244647ad8475SAndrea Arcangeli copy_user_huge_page(new_page, old_page, address, vma, 244747ad8475SAndrea Arcangeli pages_per_huge_page(h)); 24480ed361deSNick Piggin __SetPageUptodate(new_page); 24491e8f889bSDavid Gibson 2450b76c8cfbSLarry Woodman /* 2451b76c8cfbSLarry Woodman * Retake the page_table_lock to check for racing updates 2452b76c8cfbSLarry Woodman * before the page tables are altered 2453b76c8cfbSLarry Woodman */ 2454b76c8cfbSLarry Woodman spin_lock(&mm->page_table_lock); 2455a5516438SAndi Kleen ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 24567f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 24571e8f889bSDavid Gibson /* Break COW */ 24583edd4fc9SDoug Doan mmu_notifier_invalidate_range_start(mm, 24593edd4fc9SDoug Doan address & huge_page_mask(h), 24603edd4fc9SDoug Doan (address & huge_page_mask(h)) + huge_page_size(h)); 24618fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 24621e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 24631e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 24640fe6e20bSNaoya Horiguchi page_remove_rmap(old_page); 2465cd67f0d2SNaoya Horiguchi hugepage_add_new_anon_rmap(new_page, vma, address); 24661e8f889bSDavid Gibson /* Make the old page be freed below */ 24671e8f889bSDavid Gibson new_page = old_page; 24683edd4fc9SDoug Doan mmu_notifier_invalidate_range_end(mm, 24693edd4fc9SDoug Doan address & huge_page_mask(h), 24703edd4fc9SDoug Doan (address & huge_page_mask(h)) + huge_page_size(h)); 24711e8f889bSDavid Gibson } 24721e8f889bSDavid Gibson page_cache_release(new_page); 24731e8f889bSDavid Gibson page_cache_release(old_page); 247483c54070SNick Piggin return 0; 24751e8f889bSDavid Gibson } 24761e8f889bSDavid Gibson 247704f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 2478a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h, 2479a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 248004f2cbe3SMel Gorman { 248104f2cbe3SMel Gorman struct address_space *mapping; 2482e7c4b0bfSAndy Whitcroft pgoff_t idx; 248304f2cbe3SMel Gorman 248404f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 2485a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 248604f2cbe3SMel Gorman 248704f2cbe3SMel Gorman return find_lock_page(mapping, idx); 248804f2cbe3SMel Gorman } 248904f2cbe3SMel Gorman 24903ae77f43SHugh Dickins /* 24913ae77f43SHugh Dickins * Return whether there is a pagecache page to back given address within VMA. 24923ae77f43SHugh Dickins * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 24933ae77f43SHugh Dickins */ 24943ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h, 24952a15efc9SHugh Dickins struct vm_area_struct *vma, unsigned long address) 24962a15efc9SHugh Dickins { 24972a15efc9SHugh Dickins struct address_space *mapping; 24982a15efc9SHugh Dickins pgoff_t idx; 24992a15efc9SHugh Dickins struct page *page; 25002a15efc9SHugh Dickins 25012a15efc9SHugh Dickins mapping = vma->vm_file->f_mapping; 25022a15efc9SHugh Dickins idx = vma_hugecache_offset(h, vma, address); 25032a15efc9SHugh Dickins 25042a15efc9SHugh Dickins page = find_get_page(mapping, idx); 25052a15efc9SHugh Dickins if (page) 25062a15efc9SHugh Dickins put_page(page); 25072a15efc9SHugh Dickins return page != NULL; 25082a15efc9SHugh Dickins } 25092a15efc9SHugh Dickins 2510a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 2511788c7df4SHugh Dickins unsigned long address, pte_t *ptep, unsigned int flags) 2512ac9b9c66SHugh Dickins { 2513a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2514ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 2515409eb8c2SHillf Danton int anon_rmap = 0; 2516e7c4b0bfSAndy Whitcroft pgoff_t idx; 25174c887265SAdam Litke unsigned long size; 25184c887265SAdam Litke struct page *page; 25194c887265SAdam Litke struct address_space *mapping; 25201e8f889bSDavid Gibson pte_t new_pte; 25214c887265SAdam Litke 252204f2cbe3SMel Gorman /* 252304f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 252404f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 252525985edcSLucas De Marchi * COW. Warn that such a situation has occurred as it may not be obvious 252604f2cbe3SMel Gorman */ 252704f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 252804f2cbe3SMel Gorman printk(KERN_WARNING 252904f2cbe3SMel Gorman "PID %d killed due to inadequate hugepage pool\n", 253004f2cbe3SMel Gorman current->pid); 253104f2cbe3SMel Gorman return ret; 253204f2cbe3SMel Gorman } 253304f2cbe3SMel Gorman 25344c887265SAdam Litke mapping = vma->vm_file->f_mapping; 2535a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 25364c887265SAdam Litke 25374c887265SAdam Litke /* 25384c887265SAdam Litke * Use page lock to guard against racing truncation 25394c887265SAdam Litke * before we get page_table_lock. 25404c887265SAdam Litke */ 25416bda666aSChristoph Lameter retry: 25426bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 25436bda666aSChristoph Lameter if (!page) { 2544a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 2545ebed4bfcSHugh Dickins if (idx >= size) 2546ebed4bfcSHugh Dickins goto out; 254704f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 25482fc39cecSAdam Litke if (IS_ERR(page)) { 25492fc39cecSAdam Litke ret = -PTR_ERR(page); 25506bda666aSChristoph Lameter goto out; 25516bda666aSChristoph Lameter } 255247ad8475SAndrea Arcangeli clear_huge_page(page, address, pages_per_huge_page(h)); 25530ed361deSNick Piggin __SetPageUptodate(page); 2554ac9b9c66SHugh Dickins 2555f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 25566bda666aSChristoph Lameter int err; 255745c682a6SKen Chen struct inode *inode = mapping->host; 25586bda666aSChristoph Lameter 25596bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 25606bda666aSChristoph Lameter if (err) { 25616bda666aSChristoph Lameter put_page(page); 25626bda666aSChristoph Lameter if (err == -EEXIST) 25636bda666aSChristoph Lameter goto retry; 25646bda666aSChristoph Lameter goto out; 25656bda666aSChristoph Lameter } 256645c682a6SKen Chen 256745c682a6SKen Chen spin_lock(&inode->i_lock); 2568a5516438SAndi Kleen inode->i_blocks += blocks_per_huge_page(h); 256945c682a6SKen Chen spin_unlock(&inode->i_lock); 257023be7468SMel Gorman } else { 25716bda666aSChristoph Lameter lock_page(page); 25720fe6e20bSNaoya Horiguchi if (unlikely(anon_vma_prepare(vma))) { 25730fe6e20bSNaoya Horiguchi ret = VM_FAULT_OOM; 25740fe6e20bSNaoya Horiguchi goto backout_unlocked; 257523be7468SMel Gorman } 2576409eb8c2SHillf Danton anon_rmap = 1; 25770fe6e20bSNaoya Horiguchi } 25780fe6e20bSNaoya Horiguchi } else { 257957303d80SAndy Whitcroft /* 2580998b4382SNaoya Horiguchi * If memory error occurs between mmap() and fault, some process 2581998b4382SNaoya Horiguchi * don't have hwpoisoned swap entry for errored virtual address. 2582998b4382SNaoya Horiguchi * So we need to block hugepage fault by PG_hwpoison bit check. 2583fd6a03edSNaoya Horiguchi */ 2584fd6a03edSNaoya Horiguchi if (unlikely(PageHWPoison(page))) { 2585aa50d3a7SAndi Kleen ret = VM_FAULT_HWPOISON | 2586aa50d3a7SAndi Kleen VM_FAULT_SET_HINDEX(h - hstates); 2587fd6a03edSNaoya Horiguchi goto backout_unlocked; 25886bda666aSChristoph Lameter } 2589998b4382SNaoya Horiguchi } 25901e8f889bSDavid Gibson 259157303d80SAndy Whitcroft /* 259257303d80SAndy Whitcroft * If we are going to COW a private mapping later, we examine the 259357303d80SAndy Whitcroft * pending reservations for this page now. This will ensure that 259457303d80SAndy Whitcroft * any allocations necessary to record that reservation occur outside 259557303d80SAndy Whitcroft * the spinlock. 259657303d80SAndy Whitcroft */ 2597788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 25982b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 25992b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 26002b26736cSAndy Whitcroft goto backout_unlocked; 26012b26736cSAndy Whitcroft } 260257303d80SAndy Whitcroft 2603ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 2604a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 26054c887265SAdam Litke if (idx >= size) 26064c887265SAdam Litke goto backout; 26074c887265SAdam Litke 260883c54070SNick Piggin ret = 0; 26097f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 26104c887265SAdam Litke goto backout; 26114c887265SAdam Litke 2612409eb8c2SHillf Danton if (anon_rmap) 2613409eb8c2SHillf Danton hugepage_add_new_anon_rmap(page, vma, address); 2614409eb8c2SHillf Danton else 2615409eb8c2SHillf Danton page_dup_rmap(page); 26161e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 26171e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 26181e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 26191e8f889bSDavid Gibson 2620788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 26211e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 262204f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 26231e8f889bSDavid Gibson } 26241e8f889bSDavid Gibson 2625ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 26264c887265SAdam Litke unlock_page(page); 26274c887265SAdam Litke out: 2628ac9b9c66SHugh Dickins return ret; 26294c887265SAdam Litke 26304c887265SAdam Litke backout: 26314c887265SAdam Litke spin_unlock(&mm->page_table_lock); 26322b26736cSAndy Whitcroft backout_unlocked: 26334c887265SAdam Litke unlock_page(page); 26344c887265SAdam Litke put_page(page); 26354c887265SAdam Litke goto out; 2636ac9b9c66SHugh Dickins } 2637ac9b9c66SHugh Dickins 263886e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2639788c7df4SHugh Dickins unsigned long address, unsigned int flags) 264086e5216fSAdam Litke { 264186e5216fSAdam Litke pte_t *ptep; 264286e5216fSAdam Litke pte_t entry; 26431e8f889bSDavid Gibson int ret; 26440fe6e20bSNaoya Horiguchi struct page *page = NULL; 264557303d80SAndy Whitcroft struct page *pagecache_page = NULL; 26463935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 2647a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 264886e5216fSAdam Litke 26491e16a539SKAMEZAWA Hiroyuki address &= huge_page_mask(h); 26501e16a539SKAMEZAWA Hiroyuki 2651fd6a03edSNaoya Horiguchi ptep = huge_pte_offset(mm, address); 2652fd6a03edSNaoya Horiguchi if (ptep) { 2653fd6a03edSNaoya Horiguchi entry = huge_ptep_get(ptep); 2654290408d4SNaoya Horiguchi if (unlikely(is_hugetlb_entry_migration(entry))) { 2655290408d4SNaoya Horiguchi migration_entry_wait(mm, (pmd_t *)ptep, address); 2656290408d4SNaoya Horiguchi return 0; 2657290408d4SNaoya Horiguchi } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2658aa50d3a7SAndi Kleen return VM_FAULT_HWPOISON_LARGE | 2659aa50d3a7SAndi Kleen VM_FAULT_SET_HINDEX(h - hstates); 2660fd6a03edSNaoya Horiguchi } 2661fd6a03edSNaoya Horiguchi 2662a5516438SAndi Kleen ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 266386e5216fSAdam Litke if (!ptep) 266486e5216fSAdam Litke return VM_FAULT_OOM; 266586e5216fSAdam Litke 26663935baa9SDavid Gibson /* 26673935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 26683935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 26693935baa9SDavid Gibson * the same page in the page cache. 26703935baa9SDavid Gibson */ 26713935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 26727f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 26737f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 2674788c7df4SHugh Dickins ret = hugetlb_no_page(mm, vma, address, ptep, flags); 2675b4d1d99fSDavid Gibson goto out_mutex; 26763935baa9SDavid Gibson } 267786e5216fSAdam Litke 267883c54070SNick Piggin ret = 0; 26791e8f889bSDavid Gibson 268057303d80SAndy Whitcroft /* 268157303d80SAndy Whitcroft * If we are going to COW the mapping later, we examine the pending 268257303d80SAndy Whitcroft * reservations for this page now. This will ensure that any 268357303d80SAndy Whitcroft * allocations necessary to record that reservation occur outside the 268457303d80SAndy Whitcroft * spinlock. For private mappings, we also lookup the pagecache 268557303d80SAndy Whitcroft * page now as it is used to determine if a reservation has been 268657303d80SAndy Whitcroft * consumed. 268757303d80SAndy Whitcroft */ 2688788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) { 26892b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 26902b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 2691b4d1d99fSDavid Gibson goto out_mutex; 26922b26736cSAndy Whitcroft } 269357303d80SAndy Whitcroft 2694f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 269557303d80SAndy Whitcroft pagecache_page = hugetlbfs_pagecache_page(h, 269657303d80SAndy Whitcroft vma, address); 269757303d80SAndy Whitcroft } 269857303d80SAndy Whitcroft 269956c9cfb1SNaoya Horiguchi /* 270056c9cfb1SNaoya Horiguchi * hugetlb_cow() requires page locks of pte_page(entry) and 270156c9cfb1SNaoya Horiguchi * pagecache_page, so here we need take the former one 270256c9cfb1SNaoya Horiguchi * when page != pagecache_page or !pagecache_page. 270356c9cfb1SNaoya Horiguchi * Note that locking order is always pagecache_page -> page, 270456c9cfb1SNaoya Horiguchi * so no worry about deadlock. 270556c9cfb1SNaoya Horiguchi */ 27060fe6e20bSNaoya Horiguchi page = pte_page(entry); 270756c9cfb1SNaoya Horiguchi if (page != pagecache_page) 27080fe6e20bSNaoya Horiguchi lock_page(page); 27090fe6e20bSNaoya Horiguchi 27101e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 27111e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 2712b4d1d99fSDavid Gibson if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 2713b4d1d99fSDavid Gibson goto out_page_table_lock; 2714b4d1d99fSDavid Gibson 2715b4d1d99fSDavid Gibson 2716788c7df4SHugh Dickins if (flags & FAULT_FLAG_WRITE) { 2717b4d1d99fSDavid Gibson if (!pte_write(entry)) { 271857303d80SAndy Whitcroft ret = hugetlb_cow(mm, vma, address, ptep, entry, 271957303d80SAndy Whitcroft pagecache_page); 2720b4d1d99fSDavid Gibson goto out_page_table_lock; 2721b4d1d99fSDavid Gibson } 2722b4d1d99fSDavid Gibson entry = pte_mkdirty(entry); 2723b4d1d99fSDavid Gibson } 2724b4d1d99fSDavid Gibson entry = pte_mkyoung(entry); 2725788c7df4SHugh Dickins if (huge_ptep_set_access_flags(vma, address, ptep, entry, 2726788c7df4SHugh Dickins flags & FAULT_FLAG_WRITE)) 27274b3073e1SRussell King update_mmu_cache(vma, address, ptep); 2728b4d1d99fSDavid Gibson 2729b4d1d99fSDavid Gibson out_page_table_lock: 27301e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 273157303d80SAndy Whitcroft 273257303d80SAndy Whitcroft if (pagecache_page) { 273357303d80SAndy Whitcroft unlock_page(pagecache_page); 273457303d80SAndy Whitcroft put_page(pagecache_page); 273557303d80SAndy Whitcroft } 27361f64d69cSDean Nelson if (page != pagecache_page) 273756c9cfb1SNaoya Horiguchi unlock_page(page); 273857303d80SAndy Whitcroft 2739b4d1d99fSDavid Gibson out_mutex: 27403935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 27411e8f889bSDavid Gibson 27421e8f889bSDavid Gibson return ret; 274386e5216fSAdam Litke } 274486e5216fSAdam Litke 2745ceb86879SAndi Kleen /* Can be overriden by architectures */ 2746ceb86879SAndi Kleen __attribute__((weak)) struct page * 2747ceb86879SAndi Kleen follow_huge_pud(struct mm_struct *mm, unsigned long address, 2748ceb86879SAndi Kleen pud_t *pud, int write) 2749ceb86879SAndi Kleen { 2750ceb86879SAndi Kleen BUG(); 2751ceb86879SAndi Kleen return NULL; 2752ceb86879SAndi Kleen } 2753ceb86879SAndi Kleen 275463551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 275563551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 27565b23dbe8SAdam Litke unsigned long *position, int *length, int i, 27572a15efc9SHugh Dickins unsigned int flags) 275863551ae0SDavid Gibson { 2759d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 2760d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 276163551ae0SDavid Gibson int remainder = *length; 2762a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 276363551ae0SDavid Gibson 27641c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 276563551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 276663551ae0SDavid Gibson pte_t *pte; 27672a15efc9SHugh Dickins int absent; 276863551ae0SDavid Gibson struct page *page; 276963551ae0SDavid Gibson 27704c887265SAdam Litke /* 27714c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 27722a15efc9SHugh Dickins * each hugepage. We have to make sure we get the 27734c887265SAdam Litke * first, for the page indexing below to work. 27744c887265SAdam Litke */ 2775a5516438SAndi Kleen pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 27762a15efc9SHugh Dickins absent = !pte || huge_pte_none(huge_ptep_get(pte)); 277763551ae0SDavid Gibson 27782a15efc9SHugh Dickins /* 27792a15efc9SHugh Dickins * When coredumping, it suits get_dump_page if we just return 27803ae77f43SHugh Dickins * an error where there's an empty slot with no huge pagecache 27813ae77f43SHugh Dickins * to back it. This way, we avoid allocating a hugepage, and 27823ae77f43SHugh Dickins * the sparse dumpfile avoids allocating disk blocks, but its 27833ae77f43SHugh Dickins * huge holes still show up with zeroes where they need to be. 27842a15efc9SHugh Dickins */ 27853ae77f43SHugh Dickins if (absent && (flags & FOLL_DUMP) && 27863ae77f43SHugh Dickins !hugetlbfs_pagecache_present(h, vma, vaddr)) { 27872a15efc9SHugh Dickins remainder = 0; 27882a15efc9SHugh Dickins break; 27892a15efc9SHugh Dickins } 27902a15efc9SHugh Dickins 27912a15efc9SHugh Dickins if (absent || 27922a15efc9SHugh Dickins ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { 27934c887265SAdam Litke int ret; 27944c887265SAdam Litke 27954c887265SAdam Litke spin_unlock(&mm->page_table_lock); 27962a15efc9SHugh Dickins ret = hugetlb_fault(mm, vma, vaddr, 27972a15efc9SHugh Dickins (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); 27984c887265SAdam Litke spin_lock(&mm->page_table_lock); 2799a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 28004c887265SAdam Litke continue; 28014c887265SAdam Litke 28021c59827dSHugh Dickins remainder = 0; 28031c59827dSHugh Dickins break; 28041c59827dSHugh Dickins } 280563551ae0SDavid Gibson 2806a5516438SAndi Kleen pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 28077f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 2808d5d4b0aaSChen, Kenneth W same_page: 2809d6692183SChen, Kenneth W if (pages) { 281069d177c2SAndy Whitcroft pages[i] = mem_map_offset(page, pfn_offset); 28114b2e38adSKOSAKI Motohiro get_page(pages[i]); 2812d6692183SChen, Kenneth W } 281363551ae0SDavid Gibson 281463551ae0SDavid Gibson if (vmas) 281563551ae0SDavid Gibson vmas[i] = vma; 281663551ae0SDavid Gibson 281763551ae0SDavid Gibson vaddr += PAGE_SIZE; 2818d5d4b0aaSChen, Kenneth W ++pfn_offset; 281963551ae0SDavid Gibson --remainder; 282063551ae0SDavid Gibson ++i; 2821d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 2822a5516438SAndi Kleen pfn_offset < pages_per_huge_page(h)) { 2823d5d4b0aaSChen, Kenneth W /* 2824d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 2825d5d4b0aaSChen, Kenneth W * of this compound page. 2826d5d4b0aaSChen, Kenneth W */ 2827d5d4b0aaSChen, Kenneth W goto same_page; 2828d5d4b0aaSChen, Kenneth W } 282963551ae0SDavid Gibson } 28301c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 283163551ae0SDavid Gibson *length = remainder; 283263551ae0SDavid Gibson *position = vaddr; 283363551ae0SDavid Gibson 28342a15efc9SHugh Dickins return i ? i : -EFAULT; 283563551ae0SDavid Gibson } 28368f860591SZhang, Yanmin 28378f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 28388f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 28398f860591SZhang, Yanmin { 28408f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 28418f860591SZhang, Yanmin unsigned long start = address; 28428f860591SZhang, Yanmin pte_t *ptep; 28438f860591SZhang, Yanmin pte_t pte; 2844a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 28458f860591SZhang, Yanmin 28468f860591SZhang, Yanmin BUG_ON(address >= end); 28478f860591SZhang, Yanmin flush_cache_range(vma, address, end); 28488f860591SZhang, Yanmin 28493d48ae45SPeter Zijlstra mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 28508f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 2851a5516438SAndi Kleen for (; address < end; address += huge_page_size(h)) { 28528f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 28538f860591SZhang, Yanmin if (!ptep) 28548f860591SZhang, Yanmin continue; 285539dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 285639dde65cSChen, Kenneth W continue; 28577f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 28588f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 28598f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 28608f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 28618f860591SZhang, Yanmin } 28628f860591SZhang, Yanmin } 28638f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 28643d48ae45SPeter Zijlstra mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 28658f860591SZhang, Yanmin 28668f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 28678f860591SZhang, Yanmin } 28688f860591SZhang, Yanmin 2869a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 2870a1e78772SMel Gorman long from, long to, 28715a6fe125SMel Gorman struct vm_area_struct *vma, 2872ca16d140SKOSAKI Motohiro vm_flags_t vm_flags) 2873e4e574b7SAdam Litke { 287417c9d12eSMel Gorman long ret, chg; 2875a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 2876e4e574b7SAdam Litke 2877a1e78772SMel Gorman /* 287817c9d12eSMel Gorman * Only apply hugepage reservation if asked. At fault time, an 287917c9d12eSMel Gorman * attempt will be made for VM_NORESERVE to allocate a page 288017c9d12eSMel Gorman * and filesystem quota without using reserves 288117c9d12eSMel Gorman */ 2882ca16d140SKOSAKI Motohiro if (vm_flags & VM_NORESERVE) 288317c9d12eSMel Gorman return 0; 288417c9d12eSMel Gorman 288517c9d12eSMel Gorman /* 2886a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 2887a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 2888a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 2889a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 2890a1e78772SMel Gorman */ 2891f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 2892e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 28935a6fe125SMel Gorman else { 28945a6fe125SMel Gorman struct resv_map *resv_map = resv_map_alloc(); 28955a6fe125SMel Gorman if (!resv_map) 28965a6fe125SMel Gorman return -ENOMEM; 28975a6fe125SMel Gorman 289817c9d12eSMel Gorman chg = to - from; 289917c9d12eSMel Gorman 29005a6fe125SMel Gorman set_vma_resv_map(vma, resv_map); 29015a6fe125SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 29025a6fe125SMel Gorman } 29035a6fe125SMel Gorman 290417c9d12eSMel Gorman if (chg < 0) 290517c9d12eSMel Gorman return chg; 290617c9d12eSMel Gorman 290717c9d12eSMel Gorman /* There must be enough filesystem quota for the mapping */ 290817c9d12eSMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 290917c9d12eSMel Gorman return -ENOSPC; 291017c9d12eSMel Gorman 291117c9d12eSMel Gorman /* 291217c9d12eSMel Gorman * Check enough hugepages are available for the reservation. 291317c9d12eSMel Gorman * Hand back the quota if there are not 291417c9d12eSMel Gorman */ 291517c9d12eSMel Gorman ret = hugetlb_acct_memory(h, chg); 291617c9d12eSMel Gorman if (ret < 0) { 291717c9d12eSMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 291817c9d12eSMel Gorman return ret; 291917c9d12eSMel Gorman } 292017c9d12eSMel Gorman 292117c9d12eSMel Gorman /* 292217c9d12eSMel Gorman * Account for the reservations made. Shared mappings record regions 292317c9d12eSMel Gorman * that have reservations as they are shared by multiple VMAs. 292417c9d12eSMel Gorman * When the last VMA disappears, the region map says how much 292517c9d12eSMel Gorman * the reservation was and the page cache tells how much of 292617c9d12eSMel Gorman * the reservation was consumed. Private mappings are per-VMA and 292717c9d12eSMel Gorman * only the consumed reservations are tracked. When the VMA 292817c9d12eSMel Gorman * disappears, the original reservation is the VMA size and the 292917c9d12eSMel Gorman * consumed reservations are stored in the map. Hence, nothing 293017c9d12eSMel Gorman * else has to be done for private mappings here 293117c9d12eSMel Gorman */ 2932f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 293317c9d12eSMel Gorman region_add(&inode->i_mapping->private_list, from, to); 2934a43a8c39SChen, Kenneth W return 0; 2935a43a8c39SChen, Kenneth W } 2936a43a8c39SChen, Kenneth W 2937a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 2938a43a8c39SChen, Kenneth W { 2939a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 2940a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 294145c682a6SKen Chen 294245c682a6SKen Chen spin_lock(&inode->i_lock); 2943e4c6f8beSEric Sandeen inode->i_blocks -= (blocks_per_huge_page(h) * freed); 294445c682a6SKen Chen spin_unlock(&inode->i_lock); 294545c682a6SKen Chen 294690d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 2947a5516438SAndi Kleen hugetlb_acct_memory(h, -(chg - freed)); 2948a43a8c39SChen, Kenneth W } 294993f70f90SNaoya Horiguchi 2950d5bd9106SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE 2951d5bd9106SAndi Kleen 29526de2b1aaSNaoya Horiguchi /* Should be called in hugetlb_lock */ 29536de2b1aaSNaoya Horiguchi static int is_hugepage_on_freelist(struct page *hpage) 29546de2b1aaSNaoya Horiguchi { 29556de2b1aaSNaoya Horiguchi struct page *page; 29566de2b1aaSNaoya Horiguchi struct page *tmp; 29576de2b1aaSNaoya Horiguchi struct hstate *h = page_hstate(hpage); 29586de2b1aaSNaoya Horiguchi int nid = page_to_nid(hpage); 29596de2b1aaSNaoya Horiguchi 29606de2b1aaSNaoya Horiguchi list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru) 29616de2b1aaSNaoya Horiguchi if (page == hpage) 29626de2b1aaSNaoya Horiguchi return 1; 29636de2b1aaSNaoya Horiguchi return 0; 29646de2b1aaSNaoya Horiguchi } 29656de2b1aaSNaoya Horiguchi 296693f70f90SNaoya Horiguchi /* 296793f70f90SNaoya Horiguchi * This function is called from memory failure code. 296893f70f90SNaoya Horiguchi * Assume the caller holds page lock of the head page. 296993f70f90SNaoya Horiguchi */ 29706de2b1aaSNaoya Horiguchi int dequeue_hwpoisoned_huge_page(struct page *hpage) 297193f70f90SNaoya Horiguchi { 297293f70f90SNaoya Horiguchi struct hstate *h = page_hstate(hpage); 297393f70f90SNaoya Horiguchi int nid = page_to_nid(hpage); 29746de2b1aaSNaoya Horiguchi int ret = -EBUSY; 297593f70f90SNaoya Horiguchi 297693f70f90SNaoya Horiguchi spin_lock(&hugetlb_lock); 29776de2b1aaSNaoya Horiguchi if (is_hugepage_on_freelist(hpage)) { 297893f70f90SNaoya Horiguchi list_del(&hpage->lru); 29798c6c2ecbSNaoya Horiguchi set_page_refcounted(hpage); 298093f70f90SNaoya Horiguchi h->free_huge_pages--; 298193f70f90SNaoya Horiguchi h->free_huge_pages_node[nid]--; 29826de2b1aaSNaoya Horiguchi ret = 0; 298393f70f90SNaoya Horiguchi } 29846de2b1aaSNaoya Horiguchi spin_unlock(&hugetlb_lock); 29856de2b1aaSNaoya Horiguchi return ret; 29866de2b1aaSNaoya Horiguchi } 29876de2b1aaSNaoya Horiguchi #endif 2988