11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 10e1759c21SAlexey Dobriyan #include <linux/seq_file.h> 111da177e4SLinus Torvalds #include <linux/sysctl.h> 121da177e4SLinus Torvalds #include <linux/highmem.h> 13cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 141da177e4SLinus Torvalds #include <linux/nodemask.h> 1563551ae0SDavid Gibson #include <linux/pagemap.h> 165da7ca86SChristoph Lameter #include <linux/mempolicy.h> 17aea47ff3SChristoph Lameter #include <linux/cpuset.h> 183935baa9SDavid Gibson #include <linux/mutex.h> 19aa888a74SAndi Kleen #include <linux/bootmem.h> 20a3437870SNishanth Aravamudan #include <linux/sysfs.h> 21d6606683SLinus Torvalds 2263551ae0SDavid Gibson #include <asm/page.h> 2363551ae0SDavid Gibson #include <asm/pgtable.h> 2478a34ae2SAdrian Bunk #include <asm/io.h> 2563551ae0SDavid Gibson 2663551ae0SDavid Gibson #include <linux/hugetlb.h> 277835e98bSNick Piggin #include "internal.h" 281da177e4SLinus Torvalds 291da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 30396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 31396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 32a5516438SAndi Kleen 33e5ff2159SAndi Kleen static int max_hstate; 34e5ff2159SAndi Kleen unsigned int default_hstate_idx; 35e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE]; 36e5ff2159SAndi Kleen 3753ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages); 3853ba51d2SJon Tollefson 39e5ff2159SAndi Kleen /* for command line parsing */ 40e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate; 41e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages; 42e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size; 43e5ff2159SAndi Kleen 44e5ff2159SAndi Kleen #define for_each_hstate(h) \ 45e5ff2159SAndi Kleen for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++) 46396faf03SMel Gorman 473935baa9SDavid Gibson /* 483935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 493935baa9SDavid Gibson */ 503935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 510bd0f9fbSEric Paris 52e7c4b0bfSAndy Whitcroft /* 5396822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 5496822904SAndy Whitcroft * across the pages in a mapping. 5584afd99bSAndy Whitcroft * 5684afd99bSAndy Whitcroft * The region data structures are protected by a combination of the mmap_sem 5784afd99bSAndy Whitcroft * and the hugetlb_instantion_mutex. To access or modify a region the caller 5884afd99bSAndy Whitcroft * must either hold the mmap_sem for write, or the mmap_sem for read and 5984afd99bSAndy Whitcroft * the hugetlb_instantiation mutex: 6084afd99bSAndy Whitcroft * 6184afd99bSAndy Whitcroft * down_write(&mm->mmap_sem); 6284afd99bSAndy Whitcroft * or 6384afd99bSAndy Whitcroft * down_read(&mm->mmap_sem); 6484afd99bSAndy Whitcroft * mutex_lock(&hugetlb_instantiation_mutex); 6596822904SAndy Whitcroft */ 6696822904SAndy Whitcroft struct file_region { 6796822904SAndy Whitcroft struct list_head link; 6896822904SAndy Whitcroft long from; 6996822904SAndy Whitcroft long to; 7096822904SAndy Whitcroft }; 7196822904SAndy Whitcroft 7296822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 7396822904SAndy Whitcroft { 7496822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 7596822904SAndy Whitcroft 7696822904SAndy Whitcroft /* Locate the region we are either in or before. */ 7796822904SAndy Whitcroft list_for_each_entry(rg, head, link) 7896822904SAndy Whitcroft if (f <= rg->to) 7996822904SAndy Whitcroft break; 8096822904SAndy Whitcroft 8196822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 8296822904SAndy Whitcroft if (f > rg->from) 8396822904SAndy Whitcroft f = rg->from; 8496822904SAndy Whitcroft 8596822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 8696822904SAndy Whitcroft nrg = rg; 8796822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 8896822904SAndy Whitcroft if (&rg->link == head) 8996822904SAndy Whitcroft break; 9096822904SAndy Whitcroft if (rg->from > t) 9196822904SAndy Whitcroft break; 9296822904SAndy Whitcroft 9396822904SAndy Whitcroft /* If this area reaches higher then extend our area to 9496822904SAndy Whitcroft * include it completely. If this is not the first area 9596822904SAndy Whitcroft * which we intend to reuse, free it. */ 9696822904SAndy Whitcroft if (rg->to > t) 9796822904SAndy Whitcroft t = rg->to; 9896822904SAndy Whitcroft if (rg != nrg) { 9996822904SAndy Whitcroft list_del(&rg->link); 10096822904SAndy Whitcroft kfree(rg); 10196822904SAndy Whitcroft } 10296822904SAndy Whitcroft } 10396822904SAndy Whitcroft nrg->from = f; 10496822904SAndy Whitcroft nrg->to = t; 10596822904SAndy Whitcroft return 0; 10696822904SAndy Whitcroft } 10796822904SAndy Whitcroft 10896822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 10996822904SAndy Whitcroft { 11096822904SAndy Whitcroft struct file_region *rg, *nrg; 11196822904SAndy Whitcroft long chg = 0; 11296822904SAndy Whitcroft 11396822904SAndy Whitcroft /* Locate the region we are before or in. */ 11496822904SAndy Whitcroft list_for_each_entry(rg, head, link) 11596822904SAndy Whitcroft if (f <= rg->to) 11696822904SAndy Whitcroft break; 11796822904SAndy Whitcroft 11896822904SAndy Whitcroft /* If we are below the current region then a new region is required. 11996822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 12096822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 12196822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 12296822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 12396822904SAndy Whitcroft if (!nrg) 12496822904SAndy Whitcroft return -ENOMEM; 12596822904SAndy Whitcroft nrg->from = f; 12696822904SAndy Whitcroft nrg->to = f; 12796822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 12896822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 12996822904SAndy Whitcroft 13096822904SAndy Whitcroft return t - f; 13196822904SAndy Whitcroft } 13296822904SAndy Whitcroft 13396822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 13496822904SAndy Whitcroft if (f > rg->from) 13596822904SAndy Whitcroft f = rg->from; 13696822904SAndy Whitcroft chg = t - f; 13796822904SAndy Whitcroft 13896822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 13996822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 14096822904SAndy Whitcroft if (&rg->link == head) 14196822904SAndy Whitcroft break; 14296822904SAndy Whitcroft if (rg->from > t) 14396822904SAndy Whitcroft return chg; 14496822904SAndy Whitcroft 14596822904SAndy Whitcroft /* We overlap with this area, if it extends futher than 14696822904SAndy Whitcroft * us then we must extend ourselves. Account for its 14796822904SAndy Whitcroft * existing reservation. */ 14896822904SAndy Whitcroft if (rg->to > t) { 14996822904SAndy Whitcroft chg += rg->to - t; 15096822904SAndy Whitcroft t = rg->to; 15196822904SAndy Whitcroft } 15296822904SAndy Whitcroft chg -= rg->to - rg->from; 15396822904SAndy Whitcroft } 15496822904SAndy Whitcroft return chg; 15596822904SAndy Whitcroft } 15696822904SAndy Whitcroft 15796822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 15896822904SAndy Whitcroft { 15996822904SAndy Whitcroft struct file_region *rg, *trg; 16096822904SAndy Whitcroft long chg = 0; 16196822904SAndy Whitcroft 16296822904SAndy Whitcroft /* Locate the region we are either in or before. */ 16396822904SAndy Whitcroft list_for_each_entry(rg, head, link) 16496822904SAndy Whitcroft if (end <= rg->to) 16596822904SAndy Whitcroft break; 16696822904SAndy Whitcroft if (&rg->link == head) 16796822904SAndy Whitcroft return 0; 16896822904SAndy Whitcroft 16996822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 17096822904SAndy Whitcroft if (end > rg->from) { 17196822904SAndy Whitcroft chg = rg->to - end; 17296822904SAndy Whitcroft rg->to = end; 17396822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 17496822904SAndy Whitcroft } 17596822904SAndy Whitcroft 17696822904SAndy Whitcroft /* Drop any remaining regions. */ 17796822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 17896822904SAndy Whitcroft if (&rg->link == head) 17996822904SAndy Whitcroft break; 18096822904SAndy Whitcroft chg += rg->to - rg->from; 18196822904SAndy Whitcroft list_del(&rg->link); 18296822904SAndy Whitcroft kfree(rg); 18396822904SAndy Whitcroft } 18496822904SAndy Whitcroft return chg; 18596822904SAndy Whitcroft } 18696822904SAndy Whitcroft 18784afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t) 18884afd99bSAndy Whitcroft { 18984afd99bSAndy Whitcroft struct file_region *rg; 19084afd99bSAndy Whitcroft long chg = 0; 19184afd99bSAndy Whitcroft 19284afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 19384afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 19484afd99bSAndy Whitcroft int seg_from; 19584afd99bSAndy Whitcroft int seg_to; 19684afd99bSAndy Whitcroft 19784afd99bSAndy Whitcroft if (rg->to <= f) 19884afd99bSAndy Whitcroft continue; 19984afd99bSAndy Whitcroft if (rg->from >= t) 20084afd99bSAndy Whitcroft break; 20184afd99bSAndy Whitcroft 20284afd99bSAndy Whitcroft seg_from = max(rg->from, f); 20384afd99bSAndy Whitcroft seg_to = min(rg->to, t); 20484afd99bSAndy Whitcroft 20584afd99bSAndy Whitcroft chg += seg_to - seg_from; 20684afd99bSAndy Whitcroft } 20784afd99bSAndy Whitcroft 20884afd99bSAndy Whitcroft return chg; 20984afd99bSAndy Whitcroft } 21084afd99bSAndy Whitcroft 21196822904SAndy Whitcroft /* 212e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 213e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 214e7c4b0bfSAndy Whitcroft */ 215a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h, 216a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 217e7c4b0bfSAndy Whitcroft { 218a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) + 219a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h)); 220e7c4b0bfSAndy Whitcroft } 221e7c4b0bfSAndy Whitcroft 22284afd99bSAndy Whitcroft /* 22308fba699SMel Gorman * Return the size of the pages allocated when backing a VMA. In the majority 22408fba699SMel Gorman * cases this will be same size as used by the page table entries. 22508fba699SMel Gorman */ 22608fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 22708fba699SMel Gorman { 22808fba699SMel Gorman struct hstate *hstate; 22908fba699SMel Gorman 23008fba699SMel Gorman if (!is_vm_hugetlb_page(vma)) 23108fba699SMel Gorman return PAGE_SIZE; 23208fba699SMel Gorman 23308fba699SMel Gorman hstate = hstate_vma(vma); 23408fba699SMel Gorman 23508fba699SMel Gorman return 1UL << (hstate->order + PAGE_SHIFT); 23608fba699SMel Gorman } 237f340ca0fSJoerg Roedel EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 23808fba699SMel Gorman 23908fba699SMel Gorman /* 2403340289dSMel Gorman * Return the page size being used by the MMU to back a VMA. In the majority 2413340289dSMel Gorman * of cases, the page size used by the kernel matches the MMU size. On 2423340289dSMel Gorman * architectures where it differs, an architecture-specific version of this 2433340289dSMel Gorman * function is required. 2443340289dSMel Gorman */ 2453340289dSMel Gorman #ifndef vma_mmu_pagesize 2463340289dSMel Gorman unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 2473340289dSMel Gorman { 2483340289dSMel Gorman return vma_kernel_pagesize(vma); 2493340289dSMel Gorman } 2503340289dSMel Gorman #endif 2513340289dSMel Gorman 2523340289dSMel Gorman /* 25384afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 25484afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 25584afd99bSAndy Whitcroft * alignment. 25684afd99bSAndy Whitcroft */ 25784afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 25884afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 25904f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 26084afd99bSAndy Whitcroft 261a1e78772SMel Gorman /* 262a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 263a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 264a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 265a1e78772SMel Gorman * 266a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 267a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 268a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 269a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 27084afd99bSAndy Whitcroft * 27184afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 27284afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 27384afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 27484afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 27584afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 27684afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 27784afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 27884afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 279a1e78772SMel Gorman */ 280e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 281e7c4b0bfSAndy Whitcroft { 282e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 283e7c4b0bfSAndy Whitcroft } 284e7c4b0bfSAndy Whitcroft 285e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 286e7c4b0bfSAndy Whitcroft unsigned long value) 287e7c4b0bfSAndy Whitcroft { 288e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 289e7c4b0bfSAndy Whitcroft } 290e7c4b0bfSAndy Whitcroft 29184afd99bSAndy Whitcroft struct resv_map { 29284afd99bSAndy Whitcroft struct kref refs; 29384afd99bSAndy Whitcroft struct list_head regions; 29484afd99bSAndy Whitcroft }; 29584afd99bSAndy Whitcroft 2962a4b3dedSHarvey Harrison static struct resv_map *resv_map_alloc(void) 29784afd99bSAndy Whitcroft { 29884afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 29984afd99bSAndy Whitcroft if (!resv_map) 30084afd99bSAndy Whitcroft return NULL; 30184afd99bSAndy Whitcroft 30284afd99bSAndy Whitcroft kref_init(&resv_map->refs); 30384afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 30484afd99bSAndy Whitcroft 30584afd99bSAndy Whitcroft return resv_map; 30684afd99bSAndy Whitcroft } 30784afd99bSAndy Whitcroft 3082a4b3dedSHarvey Harrison static void resv_map_release(struct kref *ref) 30984afd99bSAndy Whitcroft { 31084afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 31184afd99bSAndy Whitcroft 31284afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 31384afd99bSAndy Whitcroft region_truncate(&resv_map->regions, 0); 31484afd99bSAndy Whitcroft kfree(resv_map); 31584afd99bSAndy Whitcroft } 31684afd99bSAndy Whitcroft 31784afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 318a1e78772SMel Gorman { 319a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 320f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 32184afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 32284afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 3232a4b3dedSHarvey Harrison return NULL; 324a1e78772SMel Gorman } 325a1e78772SMel Gorman 32684afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 327a1e78772SMel Gorman { 328a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 329f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 330a1e78772SMel Gorman 33184afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 33284afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 33304f2cbe3SMel Gorman } 33404f2cbe3SMel Gorman 33504f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 33604f2cbe3SMel Gorman { 33704f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 338f83a275dSMel Gorman VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); 339e7c4b0bfSAndy Whitcroft 340e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 34104f2cbe3SMel Gorman } 34204f2cbe3SMel Gorman 34304f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 34404f2cbe3SMel Gorman { 34504f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 346e7c4b0bfSAndy Whitcroft 347e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 348a1e78772SMel Gorman } 349a1e78772SMel Gorman 350a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */ 351a5516438SAndi Kleen static void decrement_hugepage_resv_vma(struct hstate *h, 352a5516438SAndi Kleen struct vm_area_struct *vma) 353a1e78772SMel Gorman { 354c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_NORESERVE) 355c37f9fb1SAndy Whitcroft return; 356c37f9fb1SAndy Whitcroft 357f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 358a1e78772SMel Gorman /* Shared mappings always use reserves */ 359a5516438SAndi Kleen h->resv_huge_pages--; 36084afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 361a1e78772SMel Gorman /* 362a1e78772SMel Gorman * Only the process that called mmap() has reserves for 363a1e78772SMel Gorman * private mappings. 364a1e78772SMel Gorman */ 365a5516438SAndi Kleen h->resv_huge_pages--; 366a1e78772SMel Gorman } 367a1e78772SMel Gorman } 368a1e78772SMel Gorman 36904f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 370a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 371a1e78772SMel Gorman { 372a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 373f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 374a1e78772SMel Gorman vma->vm_private_data = (void *)0; 375a1e78772SMel Gorman } 376a1e78772SMel Gorman 377a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 3787f09ca51SMel Gorman static int vma_has_reserves(struct vm_area_struct *vma) 379a1e78772SMel Gorman { 380f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) 381a1e78772SMel Gorman return 1; 3827f09ca51SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3837f09ca51SMel Gorman return 1; 3847f09ca51SMel Gorman return 0; 385a1e78772SMel Gorman } 386a1e78772SMel Gorman 38769d177c2SAndy Whitcroft static void clear_gigantic_page(struct page *page, 38869d177c2SAndy Whitcroft unsigned long addr, unsigned long sz) 38969d177c2SAndy Whitcroft { 39069d177c2SAndy Whitcroft int i; 39169d177c2SAndy Whitcroft struct page *p = page; 39269d177c2SAndy Whitcroft 39369d177c2SAndy Whitcroft might_sleep(); 39469d177c2SAndy Whitcroft for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) { 39569d177c2SAndy Whitcroft cond_resched(); 39669d177c2SAndy Whitcroft clear_user_highpage(p, addr + i * PAGE_SIZE); 39769d177c2SAndy Whitcroft } 39869d177c2SAndy Whitcroft } 399a5516438SAndi Kleen static void clear_huge_page(struct page *page, 400a5516438SAndi Kleen unsigned long addr, unsigned long sz) 40179ac6ba4SDavid Gibson { 40279ac6ba4SDavid Gibson int i; 40379ac6ba4SDavid Gibson 404ebdd4aeaSHannes Eder if (unlikely(sz > MAX_ORDER_NR_PAGES)) { 405ebdd4aeaSHannes Eder clear_gigantic_page(page, addr, sz); 406ebdd4aeaSHannes Eder return; 407ebdd4aeaSHannes Eder } 40869d177c2SAndy Whitcroft 40979ac6ba4SDavid Gibson might_sleep(); 410a5516438SAndi Kleen for (i = 0; i < sz/PAGE_SIZE; i++) { 41179ac6ba4SDavid Gibson cond_resched(); 412281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 41379ac6ba4SDavid Gibson } 41479ac6ba4SDavid Gibson } 41579ac6ba4SDavid Gibson 41669d177c2SAndy Whitcroft static void copy_gigantic_page(struct page *dst, struct page *src, 41769d177c2SAndy Whitcroft unsigned long addr, struct vm_area_struct *vma) 41869d177c2SAndy Whitcroft { 41969d177c2SAndy Whitcroft int i; 42069d177c2SAndy Whitcroft struct hstate *h = hstate_vma(vma); 42169d177c2SAndy Whitcroft struct page *dst_base = dst; 42269d177c2SAndy Whitcroft struct page *src_base = src; 42369d177c2SAndy Whitcroft might_sleep(); 42469d177c2SAndy Whitcroft for (i = 0; i < pages_per_huge_page(h); ) { 42569d177c2SAndy Whitcroft cond_resched(); 42669d177c2SAndy Whitcroft copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); 42769d177c2SAndy Whitcroft 42869d177c2SAndy Whitcroft i++; 42969d177c2SAndy Whitcroft dst = mem_map_next(dst, dst_base, i); 43069d177c2SAndy Whitcroft src = mem_map_next(src, src_base, i); 43169d177c2SAndy Whitcroft } 43269d177c2SAndy Whitcroft } 43379ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 4349de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 43579ac6ba4SDavid Gibson { 43679ac6ba4SDavid Gibson int i; 437a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 43879ac6ba4SDavid Gibson 439ebdd4aeaSHannes Eder if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { 440ebdd4aeaSHannes Eder copy_gigantic_page(dst, src, addr, vma); 441ebdd4aeaSHannes Eder return; 442ebdd4aeaSHannes Eder } 44369d177c2SAndy Whitcroft 44479ac6ba4SDavid Gibson might_sleep(); 445a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 44679ac6ba4SDavid Gibson cond_resched(); 4479de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 44879ac6ba4SDavid Gibson } 44979ac6ba4SDavid Gibson } 45079ac6ba4SDavid Gibson 451a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page) 4521da177e4SLinus Torvalds { 4531da177e4SLinus Torvalds int nid = page_to_nid(page); 454a5516438SAndi Kleen list_add(&page->lru, &h->hugepage_freelists[nid]); 455a5516438SAndi Kleen h->free_huge_pages++; 456a5516438SAndi Kleen h->free_huge_pages_node[nid]++; 4571da177e4SLinus Torvalds } 4581da177e4SLinus Torvalds 459a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h, 460a5516438SAndi Kleen struct vm_area_struct *vma, 46104f2cbe3SMel Gorman unsigned long address, int avoid_reserve) 4621da177e4SLinus Torvalds { 46331a5c6e4SNishanth Aravamudan int nid; 4641da177e4SLinus Torvalds struct page *page = NULL; 465480eccf9SLee Schermerhorn struct mempolicy *mpol; 46619770b32SMel Gorman nodemask_t *nodemask; 467396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 46819770b32SMel Gorman htlb_alloc_mask, &mpol, &nodemask); 469dd1a239fSMel Gorman struct zone *zone; 470dd1a239fSMel Gorman struct zoneref *z; 4711da177e4SLinus Torvalds 472a1e78772SMel Gorman /* 473a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 474a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 475a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 476a1e78772SMel Gorman */ 4777f09ca51SMel Gorman if (!vma_has_reserves(vma) && 478a5516438SAndi Kleen h->free_huge_pages - h->resv_huge_pages == 0) 479a1e78772SMel Gorman return NULL; 480a1e78772SMel Gorman 48104f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 482a5516438SAndi Kleen if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 48304f2cbe3SMel Gorman return NULL; 48404f2cbe3SMel Gorman 48519770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 48619770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 48754a6eb5cSMel Gorman nid = zone_to_nid(zone); 48854a6eb5cSMel Gorman if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 489a5516438SAndi Kleen !list_empty(&h->hugepage_freelists[nid])) { 490a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 4911da177e4SLinus Torvalds struct page, lru); 4921da177e4SLinus Torvalds list_del(&page->lru); 493a5516438SAndi Kleen h->free_huge_pages--; 494a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 49504f2cbe3SMel Gorman 49604f2cbe3SMel Gorman if (!avoid_reserve) 497a5516438SAndi Kleen decrement_hugepage_resv_vma(h, vma); 498a1e78772SMel Gorman 4995ab3ee7bSKen Chen break; 5001da177e4SLinus Torvalds } 5013abf7afdSAndrew Morton } 50252cd3b07SLee Schermerhorn mpol_cond_put(mpol); 5031da177e4SLinus Torvalds return page; 5041da177e4SLinus Torvalds } 5051da177e4SLinus Torvalds 506a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page) 5076af2acb6SAdam Litke { 5086af2acb6SAdam Litke int i; 509a5516438SAndi Kleen 51018229df5SAndy Whitcroft VM_BUG_ON(h->order >= MAX_ORDER); 51118229df5SAndy Whitcroft 512a5516438SAndi Kleen h->nr_huge_pages--; 513a5516438SAndi Kleen h->nr_huge_pages_node[page_to_nid(page)]--; 514a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 5156af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 5166af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 5176af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 5186af2acb6SAdam Litke } 5196af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 5206af2acb6SAdam Litke set_page_refcounted(page); 5217f2e9525SGerald Schaefer arch_release_hugepage(page); 522a5516438SAndi Kleen __free_pages(page, huge_page_order(h)); 5236af2acb6SAdam Litke } 5246af2acb6SAdam Litke 525e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size) 526e5ff2159SAndi Kleen { 527e5ff2159SAndi Kleen struct hstate *h; 528e5ff2159SAndi Kleen 529e5ff2159SAndi Kleen for_each_hstate(h) { 530e5ff2159SAndi Kleen if (huge_page_size(h) == size) 531e5ff2159SAndi Kleen return h; 532e5ff2159SAndi Kleen } 533e5ff2159SAndi Kleen return NULL; 534e5ff2159SAndi Kleen } 535e5ff2159SAndi Kleen 53627a85ef1SDavid Gibson static void free_huge_page(struct page *page) 53727a85ef1SDavid Gibson { 538a5516438SAndi Kleen /* 539a5516438SAndi Kleen * Can't pass hstate in here because it is called from the 540a5516438SAndi Kleen * compound page destructor. 541a5516438SAndi Kleen */ 542e5ff2159SAndi Kleen struct hstate *h = page_hstate(page); 5437893d1d5SAdam Litke int nid = page_to_nid(page); 544c79fb75eSAdam Litke struct address_space *mapping; 54527a85ef1SDavid Gibson 546c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 547e5df70abSAndy Whitcroft set_page_private(page, 0); 5487893d1d5SAdam Litke BUG_ON(page_count(page)); 54927a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 55027a85ef1SDavid Gibson 55127a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 552aa888a74SAndi Kleen if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { 553a5516438SAndi Kleen update_and_free_page(h, page); 554a5516438SAndi Kleen h->surplus_huge_pages--; 555a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 5567893d1d5SAdam Litke } else { 557a5516438SAndi Kleen enqueue_huge_page(h, page); 5587893d1d5SAdam Litke } 55927a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 560c79fb75eSAdam Litke if (mapping) 5619a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 56227a85ef1SDavid Gibson } 56327a85ef1SDavid Gibson 564a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 565b7ba30c6SAndi Kleen { 566b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 567b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 568a5516438SAndi Kleen h->nr_huge_pages++; 569a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 570b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 571b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 572b7ba30c6SAndi Kleen } 573b7ba30c6SAndi Kleen 57420a0307cSWu Fengguang static void prep_compound_gigantic_page(struct page *page, unsigned long order) 57520a0307cSWu Fengguang { 57620a0307cSWu Fengguang int i; 57720a0307cSWu Fengguang int nr_pages = 1 << order; 57820a0307cSWu Fengguang struct page *p = page + 1; 57920a0307cSWu Fengguang 58020a0307cSWu Fengguang /* we rely on prep_new_huge_page to set the destructor */ 58120a0307cSWu Fengguang set_compound_order(page, order); 58220a0307cSWu Fengguang __SetPageHead(page); 58320a0307cSWu Fengguang for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 58420a0307cSWu Fengguang __SetPageTail(p); 58520a0307cSWu Fengguang p->first_page = page; 58620a0307cSWu Fengguang } 58720a0307cSWu Fengguang } 58820a0307cSWu Fengguang 58920a0307cSWu Fengguang int PageHuge(struct page *page) 59020a0307cSWu Fengguang { 59120a0307cSWu Fengguang compound_page_dtor *dtor; 59220a0307cSWu Fengguang 59320a0307cSWu Fengguang if (!PageCompound(page)) 59420a0307cSWu Fengguang return 0; 59520a0307cSWu Fengguang 59620a0307cSWu Fengguang page = compound_head(page); 59720a0307cSWu Fengguang dtor = get_compound_page_dtor(page); 59820a0307cSWu Fengguang 59920a0307cSWu Fengguang return dtor == free_huge_page; 60020a0307cSWu Fengguang } 60120a0307cSWu Fengguang 602a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 6031da177e4SLinus Torvalds { 6041da177e4SLinus Torvalds struct page *page; 605f96efd58SJoe Jin 606aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 607aa888a74SAndi Kleen return NULL; 608aa888a74SAndi Kleen 6096484eb3eSMel Gorman page = alloc_pages_exact_node(nid, 610551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 611551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 612a5516438SAndi Kleen huge_page_order(h)); 6131da177e4SLinus Torvalds if (page) { 6147f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 615caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 6167b8ee84dSHarvey Harrison return NULL; 6177f2e9525SGerald Schaefer } 618a5516438SAndi Kleen prep_new_huge_page(h, page, nid); 6191da177e4SLinus Torvalds } 62063b4613cSNishanth Aravamudan 62163b4613cSNishanth Aravamudan return page; 62263b4613cSNishanth Aravamudan } 62363b4613cSNishanth Aravamudan 6245ced66c9SAndi Kleen /* 6256ae11b27SLee Schermerhorn * common helper functions for hstate_next_node_to_{alloc|free}. 6266ae11b27SLee Schermerhorn * We may have allocated or freed a huge page based on a different 6276ae11b27SLee Schermerhorn * nodes_allowed previously, so h->next_node_to_{alloc|free} might 6286ae11b27SLee Schermerhorn * be outside of *nodes_allowed. Ensure that we use an allowed 6296ae11b27SLee Schermerhorn * node for alloc or free. 6309a76db09SLee Schermerhorn */ 6316ae11b27SLee Schermerhorn static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 6329a76db09SLee Schermerhorn { 6336ae11b27SLee Schermerhorn nid = next_node(nid, *nodes_allowed); 6349a76db09SLee Schermerhorn if (nid == MAX_NUMNODES) 6356ae11b27SLee Schermerhorn nid = first_node(*nodes_allowed); 6369a76db09SLee Schermerhorn VM_BUG_ON(nid >= MAX_NUMNODES); 6379a76db09SLee Schermerhorn 6389a76db09SLee Schermerhorn return nid; 6399a76db09SLee Schermerhorn } 6409a76db09SLee Schermerhorn 6416ae11b27SLee Schermerhorn static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 6425ced66c9SAndi Kleen { 6436ae11b27SLee Schermerhorn if (!node_isset(nid, *nodes_allowed)) 6446ae11b27SLee Schermerhorn nid = next_node_allowed(nid, nodes_allowed); 6459a76db09SLee Schermerhorn return nid; 6465ced66c9SAndi Kleen } 6475ced66c9SAndi Kleen 6486ae11b27SLee Schermerhorn /* 6496ae11b27SLee Schermerhorn * returns the previously saved node ["this node"] from which to 6506ae11b27SLee Schermerhorn * allocate a persistent huge page for the pool and advance the 6516ae11b27SLee Schermerhorn * next node from which to allocate, handling wrap at end of node 6526ae11b27SLee Schermerhorn * mask. 6536ae11b27SLee Schermerhorn */ 6546ae11b27SLee Schermerhorn static int hstate_next_node_to_alloc(struct hstate *h, 6556ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 6566ae11b27SLee Schermerhorn { 6576ae11b27SLee Schermerhorn int nid; 6586ae11b27SLee Schermerhorn 6596ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 6606ae11b27SLee Schermerhorn 6616ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 6626ae11b27SLee Schermerhorn h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 6636ae11b27SLee Schermerhorn 6646ae11b27SLee Schermerhorn return nid; 6656ae11b27SLee Schermerhorn } 6666ae11b27SLee Schermerhorn 6676ae11b27SLee Schermerhorn static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 66863b4613cSNishanth Aravamudan { 66963b4613cSNishanth Aravamudan struct page *page; 67063b4613cSNishanth Aravamudan int start_nid; 67163b4613cSNishanth Aravamudan int next_nid; 67263b4613cSNishanth Aravamudan int ret = 0; 67363b4613cSNishanth Aravamudan 6746ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_alloc(h, nodes_allowed); 675e8c5c824SLee Schermerhorn next_nid = start_nid; 67663b4613cSNishanth Aravamudan 67763b4613cSNishanth Aravamudan do { 678e8c5c824SLee Schermerhorn page = alloc_fresh_huge_page_node(h, next_nid); 6799a76db09SLee Schermerhorn if (page) { 68063b4613cSNishanth Aravamudan ret = 1; 6819a76db09SLee Schermerhorn break; 6829a76db09SLee Schermerhorn } 6836ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_alloc(h, nodes_allowed); 6849a76db09SLee Schermerhorn } while (next_nid != start_nid); 68563b4613cSNishanth Aravamudan 6863b116300SAdam Litke if (ret) 6873b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 6883b116300SAdam Litke else 6893b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 6903b116300SAdam Litke 69163b4613cSNishanth Aravamudan return ret; 6921da177e4SLinus Torvalds } 6931da177e4SLinus Torvalds 694e8c5c824SLee Schermerhorn /* 6956ae11b27SLee Schermerhorn * helper for free_pool_huge_page() - return the previously saved 6966ae11b27SLee Schermerhorn * node ["this node"] from which to free a huge page. Advance the 6976ae11b27SLee Schermerhorn * next node id whether or not we find a free huge page to free so 6986ae11b27SLee Schermerhorn * that the next attempt to free addresses the next node. 699e8c5c824SLee Schermerhorn */ 7006ae11b27SLee Schermerhorn static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 701e8c5c824SLee Schermerhorn { 7026ae11b27SLee Schermerhorn int nid; 7039a76db09SLee Schermerhorn 7046ae11b27SLee Schermerhorn VM_BUG_ON(!nodes_allowed); 7056ae11b27SLee Schermerhorn 7066ae11b27SLee Schermerhorn nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 7076ae11b27SLee Schermerhorn h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 7086ae11b27SLee Schermerhorn 7099a76db09SLee Schermerhorn return nid; 710e8c5c824SLee Schermerhorn } 711e8c5c824SLee Schermerhorn 712e8c5c824SLee Schermerhorn /* 713e8c5c824SLee Schermerhorn * Free huge page from pool from next node to free. 714e8c5c824SLee Schermerhorn * Attempt to keep persistent huge pages more or less 715e8c5c824SLee Schermerhorn * balanced over allowed nodes. 716e8c5c824SLee Schermerhorn * Called with hugetlb_lock locked. 717e8c5c824SLee Schermerhorn */ 7186ae11b27SLee Schermerhorn static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 7196ae11b27SLee Schermerhorn bool acct_surplus) 720e8c5c824SLee Schermerhorn { 721e8c5c824SLee Schermerhorn int start_nid; 722e8c5c824SLee Schermerhorn int next_nid; 723e8c5c824SLee Schermerhorn int ret = 0; 724e8c5c824SLee Schermerhorn 7256ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_free(h, nodes_allowed); 726e8c5c824SLee Schermerhorn next_nid = start_nid; 727e8c5c824SLee Schermerhorn 728e8c5c824SLee Schermerhorn do { 729685f3457SLee Schermerhorn /* 730685f3457SLee Schermerhorn * If we're returning unused surplus pages, only examine 731685f3457SLee Schermerhorn * nodes with surplus pages. 732685f3457SLee Schermerhorn */ 733685f3457SLee Schermerhorn if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) && 734685f3457SLee Schermerhorn !list_empty(&h->hugepage_freelists[next_nid])) { 735e8c5c824SLee Schermerhorn struct page *page = 736e8c5c824SLee Schermerhorn list_entry(h->hugepage_freelists[next_nid].next, 737e8c5c824SLee Schermerhorn struct page, lru); 738e8c5c824SLee Schermerhorn list_del(&page->lru); 739e8c5c824SLee Schermerhorn h->free_huge_pages--; 740e8c5c824SLee Schermerhorn h->free_huge_pages_node[next_nid]--; 741685f3457SLee Schermerhorn if (acct_surplus) { 742685f3457SLee Schermerhorn h->surplus_huge_pages--; 743685f3457SLee Schermerhorn h->surplus_huge_pages_node[next_nid]--; 744685f3457SLee Schermerhorn } 745e8c5c824SLee Schermerhorn update_and_free_page(h, page); 746e8c5c824SLee Schermerhorn ret = 1; 7479a76db09SLee Schermerhorn break; 748e8c5c824SLee Schermerhorn } 7496ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_free(h, nodes_allowed); 7509a76db09SLee Schermerhorn } while (next_nid != start_nid); 751e8c5c824SLee Schermerhorn 752e8c5c824SLee Schermerhorn return ret; 753e8c5c824SLee Schermerhorn } 754e8c5c824SLee Schermerhorn 755a5516438SAndi Kleen static struct page *alloc_buddy_huge_page(struct hstate *h, 756a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 7577893d1d5SAdam Litke { 7587893d1d5SAdam Litke struct page *page; 759d1c3fb1fSNishanth Aravamudan unsigned int nid; 7607893d1d5SAdam Litke 761aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 762aa888a74SAndi Kleen return NULL; 763aa888a74SAndi Kleen 764d1c3fb1fSNishanth Aravamudan /* 765d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 766d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 767d1c3fb1fSNishanth Aravamudan * overcommit 768d1c3fb1fSNishanth Aravamudan * 769d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 770d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 771d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 772d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 773d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 774d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 775d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 776d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 777d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 778d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 779d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 780d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 781d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 782d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 783d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 784d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 785d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 786d1c3fb1fSNishanth Aravamudan */ 787d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 788a5516438SAndi Kleen if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 789d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 790d1c3fb1fSNishanth Aravamudan return NULL; 791d1c3fb1fSNishanth Aravamudan } else { 792a5516438SAndi Kleen h->nr_huge_pages++; 793a5516438SAndi Kleen h->surplus_huge_pages++; 794d1c3fb1fSNishanth Aravamudan } 795d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 796d1c3fb1fSNishanth Aravamudan 797551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 798551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 799a5516438SAndi Kleen huge_page_order(h)); 800d1c3fb1fSNishanth Aravamudan 801caff3a2cSGerald Schaefer if (page && arch_prepare_hugepage(page)) { 802caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 803caff3a2cSGerald Schaefer return NULL; 804caff3a2cSGerald Schaefer } 805caff3a2cSGerald Schaefer 8067893d1d5SAdam Litke spin_lock(&hugetlb_lock); 807d1c3fb1fSNishanth Aravamudan if (page) { 8082668db91SAdam Litke /* 8092668db91SAdam Litke * This page is now managed by the hugetlb allocator and has 8102668db91SAdam Litke * no users -- drop the buddy allocator's reference. 8112668db91SAdam Litke */ 8122668db91SAdam Litke put_page_testzero(page); 8132668db91SAdam Litke VM_BUG_ON(page_count(page)); 814d1c3fb1fSNishanth Aravamudan nid = page_to_nid(page); 815d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 816d1c3fb1fSNishanth Aravamudan /* 817d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 818d1c3fb1fSNishanth Aravamudan */ 819a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 820a5516438SAndi Kleen h->surplus_huge_pages_node[nid]++; 8213b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 822d1c3fb1fSNishanth Aravamudan } else { 823a5516438SAndi Kleen h->nr_huge_pages--; 824a5516438SAndi Kleen h->surplus_huge_pages--; 8253b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 8267893d1d5SAdam Litke } 827d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 8287893d1d5SAdam Litke 8297893d1d5SAdam Litke return page; 8307893d1d5SAdam Litke } 8317893d1d5SAdam Litke 832e4e574b7SAdam Litke /* 833e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 834e4e574b7SAdam Litke * of size 'delta'. 835e4e574b7SAdam Litke */ 836a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta) 837e4e574b7SAdam Litke { 838e4e574b7SAdam Litke struct list_head surplus_list; 839e4e574b7SAdam Litke struct page *page, *tmp; 840e4e574b7SAdam Litke int ret, i; 841e4e574b7SAdam Litke int needed, allocated; 842e4e574b7SAdam Litke 843a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 844ac09b3a1SAdam Litke if (needed <= 0) { 845a5516438SAndi Kleen h->resv_huge_pages += delta; 846e4e574b7SAdam Litke return 0; 847ac09b3a1SAdam Litke } 848e4e574b7SAdam Litke 849e4e574b7SAdam Litke allocated = 0; 850e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 851e4e574b7SAdam Litke 852e4e574b7SAdam Litke ret = -ENOMEM; 853e4e574b7SAdam Litke retry: 854e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 855e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 856a5516438SAndi Kleen page = alloc_buddy_huge_page(h, NULL, 0); 857e4e574b7SAdam Litke if (!page) { 858e4e574b7SAdam Litke /* 859e4e574b7SAdam Litke * We were not able to allocate enough pages to 860e4e574b7SAdam Litke * satisfy the entire reservation so we free what 861e4e574b7SAdam Litke * we've allocated so far. 862e4e574b7SAdam Litke */ 863e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 864e4e574b7SAdam Litke needed = 0; 865e4e574b7SAdam Litke goto free; 866e4e574b7SAdam Litke } 867e4e574b7SAdam Litke 868e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 869e4e574b7SAdam Litke } 870e4e574b7SAdam Litke allocated += needed; 871e4e574b7SAdam Litke 872e4e574b7SAdam Litke /* 873e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 874e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 875e4e574b7SAdam Litke */ 876e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 877a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - 878a5516438SAndi Kleen (h->free_huge_pages + allocated); 879e4e574b7SAdam Litke if (needed > 0) 880e4e574b7SAdam Litke goto retry; 881e4e574b7SAdam Litke 882e4e574b7SAdam Litke /* 883e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 884e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 885e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 886ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 887ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 888ac09b3a1SAdam Litke * before they are reserved. 889e4e574b7SAdam Litke */ 890e4e574b7SAdam Litke needed += allocated; 891a5516438SAndi Kleen h->resv_huge_pages += delta; 892e4e574b7SAdam Litke ret = 0; 893e4e574b7SAdam Litke free: 89419fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 89519fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 89619fc3f0aSAdam Litke if ((--needed) < 0) 89719fc3f0aSAdam Litke break; 89819fc3f0aSAdam Litke list_del(&page->lru); 899a5516438SAndi Kleen enqueue_huge_page(h, page); 90019fc3f0aSAdam Litke } 90119fc3f0aSAdam Litke 90219fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 90319fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 90419fc3f0aSAdam Litke spin_unlock(&hugetlb_lock); 905e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 906e4e574b7SAdam Litke list_del(&page->lru); 907af767cbdSAdam Litke /* 9082668db91SAdam Litke * The page has a reference count of zero already, so 9092668db91SAdam Litke * call free_huge_page directly instead of using 9102668db91SAdam Litke * put_page. This must be done with hugetlb_lock 911af767cbdSAdam Litke * unlocked which is safe because free_huge_page takes 912af767cbdSAdam Litke * hugetlb_lock before deciding how to free the page. 913af767cbdSAdam Litke */ 9142668db91SAdam Litke free_huge_page(page); 915af767cbdSAdam Litke } 91619fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 917e4e574b7SAdam Litke } 918e4e574b7SAdam Litke 919e4e574b7SAdam Litke return ret; 920e4e574b7SAdam Litke } 921e4e574b7SAdam Litke 922e4e574b7SAdam Litke /* 923e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 924e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 925e4e574b7SAdam Litke * never used. 926685f3457SLee Schermerhorn * Called with hugetlb_lock held. 927e4e574b7SAdam Litke */ 928a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h, 929a5516438SAndi Kleen unsigned long unused_resv_pages) 930e4e574b7SAdam Litke { 931e4e574b7SAdam Litke unsigned long nr_pages; 932e4e574b7SAdam Litke 933ac09b3a1SAdam Litke /* Uncommit the reservation */ 934a5516438SAndi Kleen h->resv_huge_pages -= unused_resv_pages; 935ac09b3a1SAdam Litke 936aa888a74SAndi Kleen /* Cannot return gigantic pages currently */ 937aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 938aa888a74SAndi Kleen return; 939aa888a74SAndi Kleen 940a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 941e4e574b7SAdam Litke 942685f3457SLee Schermerhorn /* 943685f3457SLee Schermerhorn * We want to release as many surplus pages as possible, spread 944685f3457SLee Schermerhorn * evenly across all nodes. Iterate across all nodes until we 945685f3457SLee Schermerhorn * can no longer free unreserved surplus pages. This occurs when 946685f3457SLee Schermerhorn * the nodes with surplus pages have no free pages. 947685f3457SLee Schermerhorn * free_pool_huge_page() will balance the the frees across the 948685f3457SLee Schermerhorn * on-line nodes for us and will handle the hstate accounting. 949685f3457SLee Schermerhorn */ 950685f3457SLee Schermerhorn while (nr_pages--) { 9516ae11b27SLee Schermerhorn if (!free_pool_huge_page(h, &node_online_map, 1)) 952685f3457SLee Schermerhorn break; 953e4e574b7SAdam Litke } 954e4e574b7SAdam Litke } 955e4e574b7SAdam Litke 956c37f9fb1SAndy Whitcroft /* 957c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 958c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 959c37f9fb1SAndy Whitcroft * reservation and actually increase quota before an allocation can occur. 960c37f9fb1SAndy Whitcroft * Where any new reservation would be required the reservation change is 961c37f9fb1SAndy Whitcroft * prepared, but not committed. Once the page has been quota'd allocated 962c37f9fb1SAndy Whitcroft * an instantiated the change should be committed via vma_commit_reservation. 963c37f9fb1SAndy Whitcroft * No action is required on failure. 964c37f9fb1SAndy Whitcroft */ 965e2f17d94SRoel Kluin static long vma_needs_reservation(struct hstate *h, 966a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 967c37f9fb1SAndy Whitcroft { 968c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 969c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 970c37f9fb1SAndy Whitcroft 971f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 972a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 973c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 974c37f9fb1SAndy Whitcroft idx, idx + 1); 975c37f9fb1SAndy Whitcroft 97684afd99bSAndy Whitcroft } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 977c37f9fb1SAndy Whitcroft return 1; 978c37f9fb1SAndy Whitcroft 97984afd99bSAndy Whitcroft } else { 980e2f17d94SRoel Kluin long err; 981a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 98284afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 98384afd99bSAndy Whitcroft 98484afd99bSAndy Whitcroft err = region_chg(&reservations->regions, idx, idx + 1); 98584afd99bSAndy Whitcroft if (err < 0) 98684afd99bSAndy Whitcroft return err; 987c37f9fb1SAndy Whitcroft return 0; 988c37f9fb1SAndy Whitcroft } 98984afd99bSAndy Whitcroft } 990a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h, 991a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 992c37f9fb1SAndy Whitcroft { 993c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 994c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 995c37f9fb1SAndy Whitcroft 996f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 997a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 998c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 99984afd99bSAndy Whitcroft 100084afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1001a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 100284afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 100384afd99bSAndy Whitcroft 100484afd99bSAndy Whitcroft /* Mark this page used in the map. */ 100584afd99bSAndy Whitcroft region_add(&reservations->regions, idx, idx + 1); 1006c37f9fb1SAndy Whitcroft } 1007c37f9fb1SAndy Whitcroft } 1008c37f9fb1SAndy Whitcroft 1009348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 101004f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 1011348ea204SAdam Litke { 1012a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1013348ea204SAdam Litke struct page *page; 10142fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 1015a1e78772SMel Gorman struct inode *inode = mapping->host; 1016e2f17d94SRoel Kluin long chg; 10172fc39cecSAdam Litke 1018a1e78772SMel Gorman /* 1019a1e78772SMel Gorman * Processes that did not create the mapping will have no reserves and 1020a1e78772SMel Gorman * will not have accounted against quota. Check that the quota can be 1021a1e78772SMel Gorman * made before satisfying the allocation 1022c37f9fb1SAndy Whitcroft * MAP_NORESERVE mappings may also need pages and quota allocated 1023c37f9fb1SAndy Whitcroft * if no reserve mapping overlaps. 1024a1e78772SMel Gorman */ 1025a5516438SAndi Kleen chg = vma_needs_reservation(h, vma, addr); 1026c37f9fb1SAndy Whitcroft if (chg < 0) 1027c37f9fb1SAndy Whitcroft return ERR_PTR(chg); 1028c37f9fb1SAndy Whitcroft if (chg) 1029a1e78772SMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 1030a1e78772SMel Gorman return ERR_PTR(-ENOSPC); 103190d8b7e6SAdam Litke 1032a1e78772SMel Gorman spin_lock(&hugetlb_lock); 1033a5516438SAndi Kleen page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 1034a1e78772SMel Gorman spin_unlock(&hugetlb_lock); 1035a1e78772SMel Gorman 1036a1e78772SMel Gorman if (!page) { 1037a5516438SAndi Kleen page = alloc_buddy_huge_page(h, vma, addr); 1038a1e78772SMel Gorman if (!page) { 1039a1e78772SMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 1040a1e78772SMel Gorman return ERR_PTR(-VM_FAULT_OOM); 1041a1e78772SMel Gorman } 1042a1e78772SMel Gorman } 1043a1e78772SMel Gorman 1044348ea204SAdam Litke set_page_refcounted(page); 10452fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 1046a1e78772SMel Gorman 1047a5516438SAndi Kleen vma_commit_reservation(h, vma, addr); 1048c37f9fb1SAndy Whitcroft 10497893d1d5SAdam Litke return page; 1050b45b5bd6SDavid Gibson } 1051b45b5bd6SDavid Gibson 105291f47662SCyrill Gorcunov int __weak alloc_bootmem_huge_page(struct hstate *h) 1053aa888a74SAndi Kleen { 1054aa888a74SAndi Kleen struct huge_bootmem_page *m; 1055aa888a74SAndi Kleen int nr_nodes = nodes_weight(node_online_map); 1056aa888a74SAndi Kleen 1057aa888a74SAndi Kleen while (nr_nodes) { 1058aa888a74SAndi Kleen void *addr; 1059aa888a74SAndi Kleen 1060aa888a74SAndi Kleen addr = __alloc_bootmem_node_nopanic( 10616ae11b27SLee Schermerhorn NODE_DATA(hstate_next_node_to_alloc(h, 10626ae11b27SLee Schermerhorn &node_online_map)), 1063aa888a74SAndi Kleen huge_page_size(h), huge_page_size(h), 0); 1064aa888a74SAndi Kleen 1065aa888a74SAndi Kleen if (addr) { 1066aa888a74SAndi Kleen /* 1067aa888a74SAndi Kleen * Use the beginning of the huge page to store the 1068aa888a74SAndi Kleen * huge_bootmem_page struct (until gather_bootmem 1069aa888a74SAndi Kleen * puts them into the mem_map). 1070aa888a74SAndi Kleen */ 1071aa888a74SAndi Kleen m = addr; 1072aa888a74SAndi Kleen goto found; 1073aa888a74SAndi Kleen } 1074aa888a74SAndi Kleen nr_nodes--; 1075aa888a74SAndi Kleen } 1076aa888a74SAndi Kleen return 0; 1077aa888a74SAndi Kleen 1078aa888a74SAndi Kleen found: 1079aa888a74SAndi Kleen BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); 1080aa888a74SAndi Kleen /* Put them into a private list first because mem_map is not up yet */ 1081aa888a74SAndi Kleen list_add(&m->list, &huge_boot_pages); 1082aa888a74SAndi Kleen m->hstate = h; 1083aa888a74SAndi Kleen return 1; 1084aa888a74SAndi Kleen } 1085aa888a74SAndi Kleen 108618229df5SAndy Whitcroft static void prep_compound_huge_page(struct page *page, int order) 108718229df5SAndy Whitcroft { 108818229df5SAndy Whitcroft if (unlikely(order > (MAX_ORDER - 1))) 108918229df5SAndy Whitcroft prep_compound_gigantic_page(page, order); 109018229df5SAndy Whitcroft else 109118229df5SAndy Whitcroft prep_compound_page(page, order); 109218229df5SAndy Whitcroft } 109318229df5SAndy Whitcroft 1094aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */ 1095aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void) 1096aa888a74SAndi Kleen { 1097aa888a74SAndi Kleen struct huge_bootmem_page *m; 1098aa888a74SAndi Kleen 1099aa888a74SAndi Kleen list_for_each_entry(m, &huge_boot_pages, list) { 1100aa888a74SAndi Kleen struct page *page = virt_to_page(m); 1101aa888a74SAndi Kleen struct hstate *h = m->hstate; 1102aa888a74SAndi Kleen __ClearPageReserved(page); 1103aa888a74SAndi Kleen WARN_ON(page_count(page) != 1); 110418229df5SAndy Whitcroft prep_compound_huge_page(page, h->order); 1105aa888a74SAndi Kleen prep_new_huge_page(h, page, page_to_nid(page)); 1106aa888a74SAndi Kleen } 1107aa888a74SAndi Kleen } 1108aa888a74SAndi Kleen 11098faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 11101da177e4SLinus Torvalds { 11111da177e4SLinus Torvalds unsigned long i; 11121da177e4SLinus Torvalds 1113e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) { 1114aa888a74SAndi Kleen if (h->order >= MAX_ORDER) { 1115aa888a74SAndi Kleen if (!alloc_bootmem_huge_page(h)) 1116aa888a74SAndi Kleen break; 11176ae11b27SLee Schermerhorn } else if (!alloc_fresh_huge_page(h, &node_online_map)) 11181da177e4SLinus Torvalds break; 11191da177e4SLinus Torvalds } 11208faa8b07SAndi Kleen h->max_huge_pages = i; 1121e5ff2159SAndi Kleen } 1122e5ff2159SAndi Kleen 1123e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void) 1124e5ff2159SAndi Kleen { 1125e5ff2159SAndi Kleen struct hstate *h; 1126e5ff2159SAndi Kleen 1127e5ff2159SAndi Kleen for_each_hstate(h) { 11288faa8b07SAndi Kleen /* oversize hugepages were init'ed in early boot */ 11298faa8b07SAndi Kleen if (h->order < MAX_ORDER) 11308faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(h); 1131e5ff2159SAndi Kleen } 1132e5ff2159SAndi Kleen } 1133e5ff2159SAndi Kleen 11344abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n) 11354abd32dbSAndi Kleen { 11364abd32dbSAndi Kleen if (n >= (1UL << 30)) 11374abd32dbSAndi Kleen sprintf(buf, "%lu GB", n >> 30); 11384abd32dbSAndi Kleen else if (n >= (1UL << 20)) 11394abd32dbSAndi Kleen sprintf(buf, "%lu MB", n >> 20); 11404abd32dbSAndi Kleen else 11414abd32dbSAndi Kleen sprintf(buf, "%lu KB", n >> 10); 11424abd32dbSAndi Kleen return buf; 11434abd32dbSAndi Kleen } 11444abd32dbSAndi Kleen 1145e5ff2159SAndi Kleen static void __init report_hugepages(void) 1146e5ff2159SAndi Kleen { 1147e5ff2159SAndi Kleen struct hstate *h; 1148e5ff2159SAndi Kleen 1149e5ff2159SAndi Kleen for_each_hstate(h) { 11504abd32dbSAndi Kleen char buf[32]; 11514abd32dbSAndi Kleen printk(KERN_INFO "HugeTLB registered %s page size, " 11524abd32dbSAndi Kleen "pre-allocated %ld pages\n", 11534abd32dbSAndi Kleen memfmt(buf, huge_page_size(h)), 11544abd32dbSAndi Kleen h->free_huge_pages); 1155e5ff2159SAndi Kleen } 1156e5ff2159SAndi Kleen } 1157e5ff2159SAndi Kleen 11581da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 11596ae11b27SLee Schermerhorn static void try_to_free_low(struct hstate *h, unsigned long count, 11606ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 11611da177e4SLinus Torvalds { 11624415cc8dSChristoph Lameter int i; 11634415cc8dSChristoph Lameter 1164aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1165aa888a74SAndi Kleen return; 1166aa888a74SAndi Kleen 11676ae11b27SLee Schermerhorn for_each_node_mask(i, *nodes_allowed) { 11681da177e4SLinus Torvalds struct page *page, *next; 1169a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i]; 1170a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) { 1171a5516438SAndi Kleen if (count >= h->nr_huge_pages) 11726b0c880dSAdam Litke return; 11731da177e4SLinus Torvalds if (PageHighMem(page)) 11741da177e4SLinus Torvalds continue; 11751da177e4SLinus Torvalds list_del(&page->lru); 1176e5ff2159SAndi Kleen update_and_free_page(h, page); 1177a5516438SAndi Kleen h->free_huge_pages--; 1178a5516438SAndi Kleen h->free_huge_pages_node[page_to_nid(page)]--; 11791da177e4SLinus Torvalds } 11801da177e4SLinus Torvalds } 11811da177e4SLinus Torvalds } 11821da177e4SLinus Torvalds #else 11836ae11b27SLee Schermerhorn static inline void try_to_free_low(struct hstate *h, unsigned long count, 11846ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 11851da177e4SLinus Torvalds { 11861da177e4SLinus Torvalds } 11871da177e4SLinus Torvalds #endif 11881da177e4SLinus Torvalds 118920a0307cSWu Fengguang /* 119020a0307cSWu Fengguang * Increment or decrement surplus_huge_pages. Keep node-specific counters 119120a0307cSWu Fengguang * balanced by operating on them in a round-robin fashion. 119220a0307cSWu Fengguang * Returns 1 if an adjustment was made. 119320a0307cSWu Fengguang */ 11946ae11b27SLee Schermerhorn static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 11956ae11b27SLee Schermerhorn int delta) 119620a0307cSWu Fengguang { 1197e8c5c824SLee Schermerhorn int start_nid, next_nid; 119820a0307cSWu Fengguang int ret = 0; 119920a0307cSWu Fengguang 120020a0307cSWu Fengguang VM_BUG_ON(delta != -1 && delta != 1); 120120a0307cSWu Fengguang 1202e8c5c824SLee Schermerhorn if (delta < 0) 12036ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_alloc(h, nodes_allowed); 1204e8c5c824SLee Schermerhorn else 12056ae11b27SLee Schermerhorn start_nid = hstate_next_node_to_free(h, nodes_allowed); 1206e8c5c824SLee Schermerhorn next_nid = start_nid; 1207e8c5c824SLee Schermerhorn 1208e8c5c824SLee Schermerhorn do { 1209e8c5c824SLee Schermerhorn int nid = next_nid; 1210e8c5c824SLee Schermerhorn if (delta < 0) { 1211e8c5c824SLee Schermerhorn /* 1212e8c5c824SLee Schermerhorn * To shrink on this node, there must be a surplus page 1213e8c5c824SLee Schermerhorn */ 12149a76db09SLee Schermerhorn if (!h->surplus_huge_pages_node[nid]) { 12156ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_alloc(h, 12166ae11b27SLee Schermerhorn nodes_allowed); 121720a0307cSWu Fengguang continue; 1218e8c5c824SLee Schermerhorn } 12199a76db09SLee Schermerhorn } 1220e8c5c824SLee Schermerhorn if (delta > 0) { 1221e8c5c824SLee Schermerhorn /* 1222e8c5c824SLee Schermerhorn * Surplus cannot exceed the total number of pages 1223e8c5c824SLee Schermerhorn */ 1224e8c5c824SLee Schermerhorn if (h->surplus_huge_pages_node[nid] >= 12259a76db09SLee Schermerhorn h->nr_huge_pages_node[nid]) { 12266ae11b27SLee Schermerhorn next_nid = hstate_next_node_to_free(h, 12276ae11b27SLee Schermerhorn nodes_allowed); 122820a0307cSWu Fengguang continue; 1229e8c5c824SLee Schermerhorn } 12309a76db09SLee Schermerhorn } 123120a0307cSWu Fengguang 123220a0307cSWu Fengguang h->surplus_huge_pages += delta; 123320a0307cSWu Fengguang h->surplus_huge_pages_node[nid] += delta; 123420a0307cSWu Fengguang ret = 1; 123520a0307cSWu Fengguang break; 1236e8c5c824SLee Schermerhorn } while (next_nid != start_nid); 123720a0307cSWu Fengguang 123820a0307cSWu Fengguang return ret; 123920a0307cSWu Fengguang } 124020a0307cSWu Fengguang 1241a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 12426ae11b27SLee Schermerhorn static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, 12436ae11b27SLee Schermerhorn nodemask_t *nodes_allowed) 12441da177e4SLinus Torvalds { 12457893d1d5SAdam Litke unsigned long min_count, ret; 12461da177e4SLinus Torvalds 1247aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1248aa888a74SAndi Kleen return h->max_huge_pages; 1249aa888a74SAndi Kleen 12507893d1d5SAdam Litke /* 12517893d1d5SAdam Litke * Increase the pool size 12527893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 12537893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 1254d1c3fb1fSNishanth Aravamudan * 1255d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 1256d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 1257d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 1258d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 1259d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 12607893d1d5SAdam Litke */ 12611da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 1262a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 12636ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, -1)) 12647893d1d5SAdam Litke break; 12657893d1d5SAdam Litke } 12667893d1d5SAdam Litke 1267a5516438SAndi Kleen while (count > persistent_huge_pages(h)) { 12687893d1d5SAdam Litke /* 12697893d1d5SAdam Litke * If this allocation races such that we no longer need the 12707893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 12717893d1d5SAdam Litke * and reducing the surplus. 12727893d1d5SAdam Litke */ 12737893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 12746ae11b27SLee Schermerhorn ret = alloc_fresh_huge_page(h, nodes_allowed); 12757893d1d5SAdam Litke spin_lock(&hugetlb_lock); 12767893d1d5SAdam Litke if (!ret) 12777893d1d5SAdam Litke goto out; 12787893d1d5SAdam Litke 12797893d1d5SAdam Litke } 12807893d1d5SAdam Litke 12817893d1d5SAdam Litke /* 12827893d1d5SAdam Litke * Decrease the pool size 12837893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 12847893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 12857893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 12867893d1d5SAdam Litke * to the desired size as pages become free. 1287d1c3fb1fSNishanth Aravamudan * 1288d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 1289d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 1290d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 1291d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 1292d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 1293d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 1294d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 12957893d1d5SAdam Litke */ 1296a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 12976b0c880dSAdam Litke min_count = max(count, min_count); 12986ae11b27SLee Schermerhorn try_to_free_low(h, min_count, nodes_allowed); 1299a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) { 13006ae11b27SLee Schermerhorn if (!free_pool_huge_page(h, nodes_allowed, 0)) 13011da177e4SLinus Torvalds break; 13021da177e4SLinus Torvalds } 1303a5516438SAndi Kleen while (count < persistent_huge_pages(h)) { 13046ae11b27SLee Schermerhorn if (!adjust_pool_surplus(h, nodes_allowed, 1)) 13057893d1d5SAdam Litke break; 13067893d1d5SAdam Litke } 13077893d1d5SAdam Litke out: 1308a5516438SAndi Kleen ret = persistent_huge_pages(h); 13091da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 13107893d1d5SAdam Litke return ret; 13111da177e4SLinus Torvalds } 13121da177e4SLinus Torvalds 1313a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \ 1314a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1315a3437870SNishanth Aravamudan 1316a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \ 1317a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = \ 1318a3437870SNishanth Aravamudan __ATTR(_name, 0644, _name##_show, _name##_store) 1319a3437870SNishanth Aravamudan 1320a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj; 1321a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1322a3437870SNishanth Aravamudan 1323a3437870SNishanth Aravamudan static struct hstate *kobj_to_hstate(struct kobject *kobj) 1324a3437870SNishanth Aravamudan { 1325a3437870SNishanth Aravamudan int i; 1326a3437870SNishanth Aravamudan for (i = 0; i < HUGE_MAX_HSTATE; i++) 1327a3437870SNishanth Aravamudan if (hstate_kobjs[i] == kobj) 1328a3437870SNishanth Aravamudan return &hstates[i]; 1329a3437870SNishanth Aravamudan BUG(); 1330a3437870SNishanth Aravamudan return NULL; 1331a3437870SNishanth Aravamudan } 1332a3437870SNishanth Aravamudan 1333a3437870SNishanth Aravamudan static ssize_t nr_hugepages_show(struct kobject *kobj, 1334a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1335a3437870SNishanth Aravamudan { 1336a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1337a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_huge_pages); 1338a3437870SNishanth Aravamudan } 1339a3437870SNishanth Aravamudan static ssize_t nr_hugepages_store(struct kobject *kobj, 1340a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1341a3437870SNishanth Aravamudan { 1342a3437870SNishanth Aravamudan int err; 1343a3437870SNishanth Aravamudan unsigned long input; 1344a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1345a3437870SNishanth Aravamudan 1346a3437870SNishanth Aravamudan err = strict_strtoul(buf, 10, &input); 1347a3437870SNishanth Aravamudan if (err) 1348a3437870SNishanth Aravamudan return 0; 1349a3437870SNishanth Aravamudan 13506ae11b27SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, input, &node_online_map); 1351a3437870SNishanth Aravamudan 1352a3437870SNishanth Aravamudan return count; 1353a3437870SNishanth Aravamudan } 1354a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages); 1355a3437870SNishanth Aravamudan 1356a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1357a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1358a3437870SNishanth Aravamudan { 1359a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1360a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1361a3437870SNishanth Aravamudan } 1362a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1363a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1364a3437870SNishanth Aravamudan { 1365a3437870SNishanth Aravamudan int err; 1366a3437870SNishanth Aravamudan unsigned long input; 1367a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1368a3437870SNishanth Aravamudan 1369a3437870SNishanth Aravamudan err = strict_strtoul(buf, 10, &input); 1370a3437870SNishanth Aravamudan if (err) 1371a3437870SNishanth Aravamudan return 0; 1372a3437870SNishanth Aravamudan 1373a3437870SNishanth Aravamudan spin_lock(&hugetlb_lock); 1374a3437870SNishanth Aravamudan h->nr_overcommit_huge_pages = input; 1375a3437870SNishanth Aravamudan spin_unlock(&hugetlb_lock); 1376a3437870SNishanth Aravamudan 1377a3437870SNishanth Aravamudan return count; 1378a3437870SNishanth Aravamudan } 1379a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages); 1380a3437870SNishanth Aravamudan 1381a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj, 1382a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1383a3437870SNishanth Aravamudan { 1384a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1385a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->free_huge_pages); 1386a3437870SNishanth Aravamudan } 1387a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages); 1388a3437870SNishanth Aravamudan 1389a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj, 1390a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1391a3437870SNishanth Aravamudan { 1392a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1393a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->resv_huge_pages); 1394a3437870SNishanth Aravamudan } 1395a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages); 1396a3437870SNishanth Aravamudan 1397a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj, 1398a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1399a3437870SNishanth Aravamudan { 1400a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1401a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->surplus_huge_pages); 1402a3437870SNishanth Aravamudan } 1403a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages); 1404a3437870SNishanth Aravamudan 1405a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = { 1406a3437870SNishanth Aravamudan &nr_hugepages_attr.attr, 1407a3437870SNishanth Aravamudan &nr_overcommit_hugepages_attr.attr, 1408a3437870SNishanth Aravamudan &free_hugepages_attr.attr, 1409a3437870SNishanth Aravamudan &resv_hugepages_attr.attr, 1410a3437870SNishanth Aravamudan &surplus_hugepages_attr.attr, 1411a3437870SNishanth Aravamudan NULL, 1412a3437870SNishanth Aravamudan }; 1413a3437870SNishanth Aravamudan 1414a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = { 1415a3437870SNishanth Aravamudan .attrs = hstate_attrs, 1416a3437870SNishanth Aravamudan }; 1417a3437870SNishanth Aravamudan 1418a3437870SNishanth Aravamudan static int __init hugetlb_sysfs_add_hstate(struct hstate *h) 1419a3437870SNishanth Aravamudan { 1420a3437870SNishanth Aravamudan int retval; 1421a3437870SNishanth Aravamudan 1422a3437870SNishanth Aravamudan hstate_kobjs[h - hstates] = kobject_create_and_add(h->name, 1423a3437870SNishanth Aravamudan hugepages_kobj); 1424a3437870SNishanth Aravamudan if (!hstate_kobjs[h - hstates]) 1425a3437870SNishanth Aravamudan return -ENOMEM; 1426a3437870SNishanth Aravamudan 1427a3437870SNishanth Aravamudan retval = sysfs_create_group(hstate_kobjs[h - hstates], 1428a3437870SNishanth Aravamudan &hstate_attr_group); 1429a3437870SNishanth Aravamudan if (retval) 1430a3437870SNishanth Aravamudan kobject_put(hstate_kobjs[h - hstates]); 1431a3437870SNishanth Aravamudan 1432a3437870SNishanth Aravamudan return retval; 1433a3437870SNishanth Aravamudan } 1434a3437870SNishanth Aravamudan 1435a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void) 1436a3437870SNishanth Aravamudan { 1437a3437870SNishanth Aravamudan struct hstate *h; 1438a3437870SNishanth Aravamudan int err; 1439a3437870SNishanth Aravamudan 1440a3437870SNishanth Aravamudan hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 1441a3437870SNishanth Aravamudan if (!hugepages_kobj) 1442a3437870SNishanth Aravamudan return; 1443a3437870SNishanth Aravamudan 1444a3437870SNishanth Aravamudan for_each_hstate(h) { 1445a3437870SNishanth Aravamudan err = hugetlb_sysfs_add_hstate(h); 1446a3437870SNishanth Aravamudan if (err) 1447a3437870SNishanth Aravamudan printk(KERN_ERR "Hugetlb: Unable to add hstate %s", 1448a3437870SNishanth Aravamudan h->name); 1449a3437870SNishanth Aravamudan } 1450a3437870SNishanth Aravamudan } 1451a3437870SNishanth Aravamudan 1452a3437870SNishanth Aravamudan static void __exit hugetlb_exit(void) 1453a3437870SNishanth Aravamudan { 1454a3437870SNishanth Aravamudan struct hstate *h; 1455a3437870SNishanth Aravamudan 1456a3437870SNishanth Aravamudan for_each_hstate(h) { 1457a3437870SNishanth Aravamudan kobject_put(hstate_kobjs[h - hstates]); 1458a3437870SNishanth Aravamudan } 1459a3437870SNishanth Aravamudan 1460a3437870SNishanth Aravamudan kobject_put(hugepages_kobj); 1461a3437870SNishanth Aravamudan } 1462a3437870SNishanth Aravamudan module_exit(hugetlb_exit); 1463a3437870SNishanth Aravamudan 1464a3437870SNishanth Aravamudan static int __init hugetlb_init(void) 1465a3437870SNishanth Aravamudan { 14660ef89d25SBenjamin Herrenschmidt /* Some platform decide whether they support huge pages at boot 14670ef89d25SBenjamin Herrenschmidt * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 14680ef89d25SBenjamin Herrenschmidt * there is no such support 14690ef89d25SBenjamin Herrenschmidt */ 14700ef89d25SBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 14710ef89d25SBenjamin Herrenschmidt return 0; 1472a3437870SNishanth Aravamudan 1473e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) { 1474e11bfbfcSNick Piggin default_hstate_size = HPAGE_SIZE; 1475e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) 1476a3437870SNishanth Aravamudan hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 1477a3437870SNishanth Aravamudan } 1478e11bfbfcSNick Piggin default_hstate_idx = size_to_hstate(default_hstate_size) - hstates; 1479e11bfbfcSNick Piggin if (default_hstate_max_huge_pages) 1480e11bfbfcSNick Piggin default_hstate.max_huge_pages = default_hstate_max_huge_pages; 1481a3437870SNishanth Aravamudan 1482a3437870SNishanth Aravamudan hugetlb_init_hstates(); 1483a3437870SNishanth Aravamudan 1484aa888a74SAndi Kleen gather_bootmem_prealloc(); 1485aa888a74SAndi Kleen 1486a3437870SNishanth Aravamudan report_hugepages(); 1487a3437870SNishanth Aravamudan 1488a3437870SNishanth Aravamudan hugetlb_sysfs_init(); 1489a3437870SNishanth Aravamudan 1490a3437870SNishanth Aravamudan return 0; 1491a3437870SNishanth Aravamudan } 1492a3437870SNishanth Aravamudan module_init(hugetlb_init); 1493a3437870SNishanth Aravamudan 1494a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */ 1495a3437870SNishanth Aravamudan void __init hugetlb_add_hstate(unsigned order) 1496a3437870SNishanth Aravamudan { 1497a3437870SNishanth Aravamudan struct hstate *h; 14988faa8b07SAndi Kleen unsigned long i; 14998faa8b07SAndi Kleen 1500a3437870SNishanth Aravamudan if (size_to_hstate(PAGE_SIZE << order)) { 1501a3437870SNishanth Aravamudan printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); 1502a3437870SNishanth Aravamudan return; 1503a3437870SNishanth Aravamudan } 1504a3437870SNishanth Aravamudan BUG_ON(max_hstate >= HUGE_MAX_HSTATE); 1505a3437870SNishanth Aravamudan BUG_ON(order == 0); 1506a3437870SNishanth Aravamudan h = &hstates[max_hstate++]; 1507a3437870SNishanth Aravamudan h->order = order; 1508a3437870SNishanth Aravamudan h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 15098faa8b07SAndi Kleen h->nr_huge_pages = 0; 15108faa8b07SAndi Kleen h->free_huge_pages = 0; 15118faa8b07SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i) 15128faa8b07SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]); 1513e8c5c824SLee Schermerhorn h->next_nid_to_alloc = first_node(node_online_map); 1514e8c5c824SLee Schermerhorn h->next_nid_to_free = first_node(node_online_map); 1515a3437870SNishanth Aravamudan snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1516a3437870SNishanth Aravamudan huge_page_size(h)/1024); 15178faa8b07SAndi Kleen 1518a3437870SNishanth Aravamudan parsed_hstate = h; 1519a3437870SNishanth Aravamudan } 1520a3437870SNishanth Aravamudan 1521e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s) 1522a3437870SNishanth Aravamudan { 1523a3437870SNishanth Aravamudan unsigned long *mhp; 15248faa8b07SAndi Kleen static unsigned long *last_mhp; 1525a3437870SNishanth Aravamudan 1526a3437870SNishanth Aravamudan /* 1527a3437870SNishanth Aravamudan * !max_hstate means we haven't parsed a hugepagesz= parameter yet, 1528a3437870SNishanth Aravamudan * so this hugepages= parameter goes to the "default hstate". 1529a3437870SNishanth Aravamudan */ 1530a3437870SNishanth Aravamudan if (!max_hstate) 1531a3437870SNishanth Aravamudan mhp = &default_hstate_max_huge_pages; 1532a3437870SNishanth Aravamudan else 1533a3437870SNishanth Aravamudan mhp = &parsed_hstate->max_huge_pages; 1534a3437870SNishanth Aravamudan 15358faa8b07SAndi Kleen if (mhp == last_mhp) { 15368faa8b07SAndi Kleen printk(KERN_WARNING "hugepages= specified twice without " 15378faa8b07SAndi Kleen "interleaving hugepagesz=, ignoring\n"); 15388faa8b07SAndi Kleen return 1; 15398faa8b07SAndi Kleen } 15408faa8b07SAndi Kleen 1541a3437870SNishanth Aravamudan if (sscanf(s, "%lu", mhp) <= 0) 1542a3437870SNishanth Aravamudan *mhp = 0; 1543a3437870SNishanth Aravamudan 15448faa8b07SAndi Kleen /* 15458faa8b07SAndi Kleen * Global state is always initialized later in hugetlb_init. 15468faa8b07SAndi Kleen * But we need to allocate >= MAX_ORDER hstates here early to still 15478faa8b07SAndi Kleen * use the bootmem allocator. 15488faa8b07SAndi Kleen */ 15498faa8b07SAndi Kleen if (max_hstate && parsed_hstate->order >= MAX_ORDER) 15508faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(parsed_hstate); 15518faa8b07SAndi Kleen 15528faa8b07SAndi Kleen last_mhp = mhp; 15538faa8b07SAndi Kleen 1554a3437870SNishanth Aravamudan return 1; 1555a3437870SNishanth Aravamudan } 1556e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup); 1557e11bfbfcSNick Piggin 1558e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s) 1559e11bfbfcSNick Piggin { 1560e11bfbfcSNick Piggin default_hstate_size = memparse(s, &s); 1561e11bfbfcSNick Piggin return 1; 1562e11bfbfcSNick Piggin } 1563e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup); 1564a3437870SNishanth Aravamudan 15658a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array) 15668a213460SNishanth Aravamudan { 15678a213460SNishanth Aravamudan int node; 15688a213460SNishanth Aravamudan unsigned int nr = 0; 15698a213460SNishanth Aravamudan 15708a213460SNishanth Aravamudan for_each_node_mask(node, cpuset_current_mems_allowed) 15718a213460SNishanth Aravamudan nr += array[node]; 15728a213460SNishanth Aravamudan 15738a213460SNishanth Aravamudan return nr; 15748a213460SNishanth Aravamudan } 15758a213460SNishanth Aravamudan 15768a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL 15771da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 15788d65af78SAlexey Dobriyan void __user *buffer, 15791da177e4SLinus Torvalds size_t *length, loff_t *ppos) 15801da177e4SLinus Torvalds { 1581e5ff2159SAndi Kleen struct hstate *h = &default_hstate; 1582e5ff2159SAndi Kleen unsigned long tmp; 1583e5ff2159SAndi Kleen 1584e5ff2159SAndi Kleen if (!write) 1585e5ff2159SAndi Kleen tmp = h->max_huge_pages; 1586e5ff2159SAndi Kleen 1587e5ff2159SAndi Kleen table->data = &tmp; 1588e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 15898d65af78SAlexey Dobriyan proc_doulongvec_minmax(table, write, buffer, length, ppos); 1590e5ff2159SAndi Kleen 1591e5ff2159SAndi Kleen if (write) 15926ae11b27SLee Schermerhorn h->max_huge_pages = set_max_huge_pages(h, tmp, 15936ae11b27SLee Schermerhorn &node_online_map); 1594e5ff2159SAndi Kleen 15951da177e4SLinus Torvalds return 0; 15961da177e4SLinus Torvalds } 1597396faf03SMel Gorman 1598396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 15998d65af78SAlexey Dobriyan void __user *buffer, 1600396faf03SMel Gorman size_t *length, loff_t *ppos) 1601396faf03SMel Gorman { 16028d65af78SAlexey Dobriyan proc_dointvec(table, write, buffer, length, ppos); 1603396faf03SMel Gorman if (hugepages_treat_as_movable) 1604396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 1605396faf03SMel Gorman else 1606396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 1607396faf03SMel Gorman return 0; 1608396faf03SMel Gorman } 1609396faf03SMel Gorman 1610a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 16118d65af78SAlexey Dobriyan void __user *buffer, 1612a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 1613a3d0c6aaSNishanth Aravamudan { 1614a5516438SAndi Kleen struct hstate *h = &default_hstate; 1615e5ff2159SAndi Kleen unsigned long tmp; 1616e5ff2159SAndi Kleen 1617e5ff2159SAndi Kleen if (!write) 1618e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages; 1619e5ff2159SAndi Kleen 1620e5ff2159SAndi Kleen table->data = &tmp; 1621e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 16228d65af78SAlexey Dobriyan proc_doulongvec_minmax(table, write, buffer, length, ppos); 1623e5ff2159SAndi Kleen 1624e5ff2159SAndi Kleen if (write) { 1625064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 1626e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp; 1627a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 1628e5ff2159SAndi Kleen } 1629e5ff2159SAndi Kleen 1630a3d0c6aaSNishanth Aravamudan return 0; 1631a3d0c6aaSNishanth Aravamudan } 1632a3d0c6aaSNishanth Aravamudan 16331da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 16341da177e4SLinus Torvalds 1635e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m) 16361da177e4SLinus Torvalds { 1637a5516438SAndi Kleen struct hstate *h = &default_hstate; 1638e1759c21SAlexey Dobriyan seq_printf(m, 16391da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 16401da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 1641b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 16427893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 16434f98a2feSRik van Riel "Hugepagesize: %8lu kB\n", 1644a5516438SAndi Kleen h->nr_huge_pages, 1645a5516438SAndi Kleen h->free_huge_pages, 1646a5516438SAndi Kleen h->resv_huge_pages, 1647a5516438SAndi Kleen h->surplus_huge_pages, 1648a5516438SAndi Kleen 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 16491da177e4SLinus Torvalds } 16501da177e4SLinus Torvalds 16511da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 16521da177e4SLinus Torvalds { 1653a5516438SAndi Kleen struct hstate *h = &default_hstate; 16541da177e4SLinus Torvalds return sprintf(buf, 16551da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 1656a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 1657a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 1658a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid], 1659a5516438SAndi Kleen nid, h->free_huge_pages_node[nid], 1660a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]); 16611da177e4SLinus Torvalds } 16621da177e4SLinus Torvalds 16631da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 16641da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 16651da177e4SLinus Torvalds { 1666a5516438SAndi Kleen struct hstate *h = &default_hstate; 1667a5516438SAndi Kleen return h->nr_huge_pages * pages_per_huge_page(h); 16681da177e4SLinus Torvalds } 16691da177e4SLinus Torvalds 1670a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta) 1671fc1b8a73SMel Gorman { 1672fc1b8a73SMel Gorman int ret = -ENOMEM; 1673fc1b8a73SMel Gorman 1674fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 1675fc1b8a73SMel Gorman /* 1676fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 1677fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 1678fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 1679fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 1680fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 1681fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 1682fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 1683fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 1684fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 1685fc1b8a73SMel Gorman * 1686fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 1687fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 1688fc1b8a73SMel Gorman * we fall back to check against current free page availability as 1689fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 1690fc1b8a73SMel Gorman * semantics that cpuset has. 1691fc1b8a73SMel Gorman */ 1692fc1b8a73SMel Gorman if (delta > 0) { 1693a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0) 1694fc1b8a73SMel Gorman goto out; 1695fc1b8a73SMel Gorman 1696a5516438SAndi Kleen if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 1697a5516438SAndi Kleen return_unused_surplus_pages(h, delta); 1698fc1b8a73SMel Gorman goto out; 1699fc1b8a73SMel Gorman } 1700fc1b8a73SMel Gorman } 1701fc1b8a73SMel Gorman 1702fc1b8a73SMel Gorman ret = 0; 1703fc1b8a73SMel Gorman if (delta < 0) 1704a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta); 1705fc1b8a73SMel Gorman 1706fc1b8a73SMel Gorman out: 1707fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 1708fc1b8a73SMel Gorman return ret; 1709fc1b8a73SMel Gorman } 1710fc1b8a73SMel Gorman 171184afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 171284afd99bSAndy Whitcroft { 171384afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 171484afd99bSAndy Whitcroft 171584afd99bSAndy Whitcroft /* 171684afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 171784afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 171884afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 171984afd99bSAndy Whitcroft * has a reference to the reservation map it cannot dissappear until 172084afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 172184afd99bSAndy Whitcroft * new reference here without additional locking. 172284afd99bSAndy Whitcroft */ 172384afd99bSAndy Whitcroft if (reservations) 172484afd99bSAndy Whitcroft kref_get(&reservations->refs); 172584afd99bSAndy Whitcroft } 172684afd99bSAndy Whitcroft 1727a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 1728a1e78772SMel Gorman { 1729a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 173084afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 173184afd99bSAndy Whitcroft unsigned long reserve; 173284afd99bSAndy Whitcroft unsigned long start; 173384afd99bSAndy Whitcroft unsigned long end; 173484afd99bSAndy Whitcroft 173584afd99bSAndy Whitcroft if (reservations) { 1736a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start); 1737a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end); 173884afd99bSAndy Whitcroft 173984afd99bSAndy Whitcroft reserve = (end - start) - 174084afd99bSAndy Whitcroft region_count(&reservations->regions, start, end); 174184afd99bSAndy Whitcroft 174284afd99bSAndy Whitcroft kref_put(&reservations->refs, resv_map_release); 174384afd99bSAndy Whitcroft 17447251ff78SAdam Litke if (reserve) { 1745a5516438SAndi Kleen hugetlb_acct_memory(h, -reserve); 17467251ff78SAdam Litke hugetlb_put_quota(vma->vm_file->f_mapping, reserve); 17477251ff78SAdam Litke } 1748a1e78772SMel Gorman } 174984afd99bSAndy Whitcroft } 1750a1e78772SMel Gorman 17511da177e4SLinus Torvalds /* 17521da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 17531da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 17541da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 17551da177e4SLinus Torvalds * this far. 17561da177e4SLinus Torvalds */ 1757d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 17581da177e4SLinus Torvalds { 17591da177e4SLinus Torvalds BUG(); 1760d0217ac0SNick Piggin return 0; 17611da177e4SLinus Torvalds } 17621da177e4SLinus Torvalds 1763f0f37e2fSAlexey Dobriyan const struct vm_operations_struct hugetlb_vm_ops = { 1764d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 176584afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 1766a1e78772SMel Gorman .close = hugetlb_vm_op_close, 17671da177e4SLinus Torvalds }; 17681da177e4SLinus Torvalds 17691e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 17701e8f889bSDavid Gibson int writable) 177163551ae0SDavid Gibson { 177263551ae0SDavid Gibson pte_t entry; 177363551ae0SDavid Gibson 17741e8f889bSDavid Gibson if (writable) { 177563551ae0SDavid Gibson entry = 177663551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 177763551ae0SDavid Gibson } else { 17787f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 177963551ae0SDavid Gibson } 178063551ae0SDavid Gibson entry = pte_mkyoung(entry); 178163551ae0SDavid Gibson entry = pte_mkhuge(entry); 178263551ae0SDavid Gibson 178363551ae0SDavid Gibson return entry; 178463551ae0SDavid Gibson } 178563551ae0SDavid Gibson 17861e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 17871e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 17881e8f889bSDavid Gibson { 17891e8f889bSDavid Gibson pte_t entry; 17901e8f889bSDavid Gibson 17917f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 17927f2e9525SGerald Schaefer if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 17931e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 17941e8f889bSDavid Gibson } 17958dab5241SBenjamin Herrenschmidt } 17961e8f889bSDavid Gibson 17971e8f889bSDavid Gibson 179863551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 179963551ae0SDavid Gibson struct vm_area_struct *vma) 180063551ae0SDavid Gibson { 180163551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 180263551ae0SDavid Gibson struct page *ptepage; 18031c59827dSHugh Dickins unsigned long addr; 18041e8f889bSDavid Gibson int cow; 1805a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1806a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 18071e8f889bSDavid Gibson 18081e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 180963551ae0SDavid Gibson 1810a5516438SAndi Kleen for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 1811c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 1812c74df32cSHugh Dickins if (!src_pte) 1813c74df32cSHugh Dickins continue; 1814a5516438SAndi Kleen dst_pte = huge_pte_alloc(dst, addr, sz); 181563551ae0SDavid Gibson if (!dst_pte) 181663551ae0SDavid Gibson goto nomem; 1817c5c99429SLarry Woodman 1818c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 1819c5c99429SLarry Woodman if (dst_pte == src_pte) 1820c5c99429SLarry Woodman continue; 1821c5c99429SLarry Woodman 1822c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 182346478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 18247f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 18251e8f889bSDavid Gibson if (cow) 18267f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 18277f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 182863551ae0SDavid Gibson ptepage = pte_page(entry); 182963551ae0SDavid Gibson get_page(ptepage); 183063551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 18311c59827dSHugh Dickins } 18321c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 1833c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 183463551ae0SDavid Gibson } 183563551ae0SDavid Gibson return 0; 183663551ae0SDavid Gibson 183763551ae0SDavid Gibson nomem: 183863551ae0SDavid Gibson return -ENOMEM; 183963551ae0SDavid Gibson } 184063551ae0SDavid Gibson 1841502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 184204f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 184363551ae0SDavid Gibson { 184463551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 184563551ae0SDavid Gibson unsigned long address; 1846c7546f8fSDavid Gibson pte_t *ptep; 184763551ae0SDavid Gibson pte_t pte; 184863551ae0SDavid Gibson struct page *page; 1849fe1668aeSChen, Kenneth W struct page *tmp; 1850a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1851a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 1852a5516438SAndi Kleen 1853c0a499c2SChen, Kenneth W /* 1854c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 1855c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 1856c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 1857c0a499c2SChen, Kenneth W */ 1858fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 185963551ae0SDavid Gibson 186063551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 1861a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h)); 1862a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h)); 186363551ae0SDavid Gibson 1864cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(mm, start, end); 1865508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 1866a5516438SAndi Kleen for (address = start; address < end; address += sz) { 1867c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 1868c7546f8fSDavid Gibson if (!ptep) 1869c7546f8fSDavid Gibson continue; 1870c7546f8fSDavid Gibson 187139dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 187239dde65cSChen, Kenneth W continue; 187339dde65cSChen, Kenneth W 187404f2cbe3SMel Gorman /* 187504f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 187604f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 187704f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 187804f2cbe3SMel Gorman */ 187904f2cbe3SMel Gorman if (ref_page) { 188004f2cbe3SMel Gorman pte = huge_ptep_get(ptep); 188104f2cbe3SMel Gorman if (huge_pte_none(pte)) 188204f2cbe3SMel Gorman continue; 188304f2cbe3SMel Gorman page = pte_page(pte); 188404f2cbe3SMel Gorman if (page != ref_page) 188504f2cbe3SMel Gorman continue; 188604f2cbe3SMel Gorman 188704f2cbe3SMel Gorman /* 188804f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 188904f2cbe3SMel Gorman * future faults in this VMA will fail rather than 189004f2cbe3SMel Gorman * looking like data was lost 189104f2cbe3SMel Gorman */ 189204f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 189304f2cbe3SMel Gorman } 189404f2cbe3SMel Gorman 1895c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 18967f2e9525SGerald Schaefer if (huge_pte_none(pte)) 189763551ae0SDavid Gibson continue; 1898c7546f8fSDavid Gibson 189963551ae0SDavid Gibson page = pte_page(pte); 19006649a386SKen Chen if (pte_dirty(pte)) 19016649a386SKen Chen set_page_dirty(page); 1902fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 190363551ae0SDavid Gibson } 19041da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 1905508034a3SHugh Dickins flush_tlb_range(vma, start, end); 1906cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start, end); 1907fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 1908fe1668aeSChen, Kenneth W list_del(&page->lru); 1909fe1668aeSChen, Kenneth W put_page(page); 1910fe1668aeSChen, Kenneth W } 19111da177e4SLinus Torvalds } 191263551ae0SDavid Gibson 1913502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 191404f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 1915502717f4SChen, Kenneth W { 1916502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 191704f2cbe3SMel Gorman __unmap_hugepage_range(vma, start, end, ref_page); 1918502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 1919502717f4SChen, Kenneth W } 1920502717f4SChen, Kenneth W 192104f2cbe3SMel Gorman /* 192204f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 192304f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 192404f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 192504f2cbe3SMel Gorman * same region. 192604f2cbe3SMel Gorman */ 19272a4b3dedSHarvey Harrison static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 19282a4b3dedSHarvey Harrison struct page *page, unsigned long address) 192904f2cbe3SMel Gorman { 19307526674dSAdam Litke struct hstate *h = hstate_vma(vma); 193104f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 193204f2cbe3SMel Gorman struct address_space *mapping; 193304f2cbe3SMel Gorman struct prio_tree_iter iter; 193404f2cbe3SMel Gorman pgoff_t pgoff; 193504f2cbe3SMel Gorman 193604f2cbe3SMel Gorman /* 193704f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 193804f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 193904f2cbe3SMel Gorman */ 19407526674dSAdam Litke address = address & huge_page_mask(h); 194104f2cbe3SMel Gorman pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) 194204f2cbe3SMel Gorman + (vma->vm_pgoff >> PAGE_SHIFT); 194304f2cbe3SMel Gorman mapping = (struct address_space *)page_private(page); 194404f2cbe3SMel Gorman 194504f2cbe3SMel Gorman vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 194604f2cbe3SMel Gorman /* Do not unmap the current VMA */ 194704f2cbe3SMel Gorman if (iter_vma == vma) 194804f2cbe3SMel Gorman continue; 194904f2cbe3SMel Gorman 195004f2cbe3SMel Gorman /* 195104f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 195204f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 195304f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 195404f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 195504f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 195604f2cbe3SMel Gorman */ 195704f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 195804f2cbe3SMel Gorman unmap_hugepage_range(iter_vma, 19597526674dSAdam Litke address, address + huge_page_size(h), 196004f2cbe3SMel Gorman page); 196104f2cbe3SMel Gorman } 196204f2cbe3SMel Gorman 196304f2cbe3SMel Gorman return 1; 196404f2cbe3SMel Gorman } 196504f2cbe3SMel Gorman 19661e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 196704f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 196804f2cbe3SMel Gorman struct page *pagecache_page) 19691e8f889bSDavid Gibson { 1970a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 19711e8f889bSDavid Gibson struct page *old_page, *new_page; 197279ac6ba4SDavid Gibson int avoidcopy; 197304f2cbe3SMel Gorman int outside_reserve = 0; 19741e8f889bSDavid Gibson 19751e8f889bSDavid Gibson old_page = pte_page(pte); 19761e8f889bSDavid Gibson 197704f2cbe3SMel Gorman retry_avoidcopy: 19781e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 19791e8f889bSDavid Gibson * and just make the page writable */ 19801e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 19811e8f889bSDavid Gibson if (avoidcopy) { 19821e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 198383c54070SNick Piggin return 0; 19841e8f889bSDavid Gibson } 19851e8f889bSDavid Gibson 198604f2cbe3SMel Gorman /* 198704f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 198804f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 198904f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 199004f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 199104f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 199204f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 199304f2cbe3SMel Gorman * of the full address range. 199404f2cbe3SMel Gorman */ 1995f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE) && 199604f2cbe3SMel Gorman is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 199704f2cbe3SMel Gorman old_page != pagecache_page) 199804f2cbe3SMel Gorman outside_reserve = 1; 199904f2cbe3SMel Gorman 20001e8f889bSDavid Gibson page_cache_get(old_page); 200104f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 20021e8f889bSDavid Gibson 20032fc39cecSAdam Litke if (IS_ERR(new_page)) { 20041e8f889bSDavid Gibson page_cache_release(old_page); 200504f2cbe3SMel Gorman 200604f2cbe3SMel Gorman /* 200704f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 200804f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 200904f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 201004f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 201104f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 201204f2cbe3SMel Gorman */ 201304f2cbe3SMel Gorman if (outside_reserve) { 201404f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 201504f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 201604f2cbe3SMel Gorman BUG_ON(page_count(old_page) != 1); 201704f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 201804f2cbe3SMel Gorman goto retry_avoidcopy; 201904f2cbe3SMel Gorman } 202004f2cbe3SMel Gorman WARN_ON_ONCE(1); 202104f2cbe3SMel Gorman } 202204f2cbe3SMel Gorman 20232fc39cecSAdam Litke return -PTR_ERR(new_page); 20241e8f889bSDavid Gibson } 20251e8f889bSDavid Gibson 20261e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 20279de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 20280ed361deSNick Piggin __SetPageUptodate(new_page); 20291e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 20301e8f889bSDavid Gibson 2031a5516438SAndi Kleen ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 20327f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 20331e8f889bSDavid Gibson /* Break COW */ 20348fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 20351e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 20361e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 20371e8f889bSDavid Gibson /* Make the old page be freed below */ 20381e8f889bSDavid Gibson new_page = old_page; 20391e8f889bSDavid Gibson } 20401e8f889bSDavid Gibson page_cache_release(new_page); 20411e8f889bSDavid Gibson page_cache_release(old_page); 204283c54070SNick Piggin return 0; 20431e8f889bSDavid Gibson } 20441e8f889bSDavid Gibson 204504f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 2046a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h, 2047a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 204804f2cbe3SMel Gorman { 204904f2cbe3SMel Gorman struct address_space *mapping; 2050e7c4b0bfSAndy Whitcroft pgoff_t idx; 205104f2cbe3SMel Gorman 205204f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 2053a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 205404f2cbe3SMel Gorman 205504f2cbe3SMel Gorman return find_lock_page(mapping, idx); 205604f2cbe3SMel Gorman } 205704f2cbe3SMel Gorman 20583ae77f43SHugh Dickins /* 20593ae77f43SHugh Dickins * Return whether there is a pagecache page to back given address within VMA. 20603ae77f43SHugh Dickins * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 20613ae77f43SHugh Dickins */ 20623ae77f43SHugh Dickins static bool hugetlbfs_pagecache_present(struct hstate *h, 20632a15efc9SHugh Dickins struct vm_area_struct *vma, unsigned long address) 20642a15efc9SHugh Dickins { 20652a15efc9SHugh Dickins struct address_space *mapping; 20662a15efc9SHugh Dickins pgoff_t idx; 20672a15efc9SHugh Dickins struct page *page; 20682a15efc9SHugh Dickins 20692a15efc9SHugh Dickins mapping = vma->vm_file->f_mapping; 20702a15efc9SHugh Dickins idx = vma_hugecache_offset(h, vma, address); 20712a15efc9SHugh Dickins 20722a15efc9SHugh Dickins page = find_get_page(mapping, idx); 20732a15efc9SHugh Dickins if (page) 20742a15efc9SHugh Dickins put_page(page); 20752a15efc9SHugh Dickins return page != NULL; 20762a15efc9SHugh Dickins } 20772a15efc9SHugh Dickins 2078a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 2079788c7df4SHugh Dickins unsigned long address, pte_t *ptep, unsigned int flags) 2080ac9b9c66SHugh Dickins { 2081a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 2082ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 2083e7c4b0bfSAndy Whitcroft pgoff_t idx; 20844c887265SAdam Litke unsigned long size; 20854c887265SAdam Litke struct page *page; 20864c887265SAdam Litke struct address_space *mapping; 20871e8f889bSDavid Gibson pte_t new_pte; 20884c887265SAdam Litke 208904f2cbe3SMel Gorman /* 209004f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 209104f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 209204f2cbe3SMel Gorman * COW. Warn that such a situation has occured as it may not be obvious 209304f2cbe3SMel Gorman */ 209404f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 209504f2cbe3SMel Gorman printk(KERN_WARNING 209604f2cbe3SMel Gorman "PID %d killed due to inadequate hugepage pool\n", 209704f2cbe3SMel Gorman current->pid); 209804f2cbe3SMel Gorman return ret; 209904f2cbe3SMel Gorman } 210004f2cbe3SMel Gorman 21014c887265SAdam Litke mapping = vma->vm_file->f_mapping; 2102a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 21034c887265SAdam Litke 21044c887265SAdam Litke /* 21054c887265SAdam Litke * Use page lock to guard against racing truncation 21064c887265SAdam Litke * before we get page_table_lock. 21074c887265SAdam Litke */ 21086bda666aSChristoph Lameter retry: 21096bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 21106bda666aSChristoph Lameter if (!page) { 2111a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 2112ebed4bfcSHugh Dickins if (idx >= size) 2113ebed4bfcSHugh Dickins goto out; 211404f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 21152fc39cecSAdam Litke if (IS_ERR(page)) { 21162fc39cecSAdam Litke ret = -PTR_ERR(page); 21176bda666aSChristoph Lameter goto out; 21186bda666aSChristoph Lameter } 2119a5516438SAndi Kleen clear_huge_page(page, address, huge_page_size(h)); 21200ed361deSNick Piggin __SetPageUptodate(page); 2121ac9b9c66SHugh Dickins 2122f83a275dSMel Gorman if (vma->vm_flags & VM_MAYSHARE) { 21236bda666aSChristoph Lameter int err; 212445c682a6SKen Chen struct inode *inode = mapping->host; 21256bda666aSChristoph Lameter 21266bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 21276bda666aSChristoph Lameter if (err) { 21286bda666aSChristoph Lameter put_page(page); 21296bda666aSChristoph Lameter if (err == -EEXIST) 21306bda666aSChristoph Lameter goto retry; 21316bda666aSChristoph Lameter goto out; 21326bda666aSChristoph Lameter } 213345c682a6SKen Chen 213445c682a6SKen Chen spin_lock(&inode->i_lock); 2135a5516438SAndi Kleen inode->i_blocks += blocks_per_huge_page(h); 213645c682a6SKen Chen spin_unlock(&inode->i_lock); 21376bda666aSChristoph Lameter } else 21386bda666aSChristoph Lameter lock_page(page); 21396bda666aSChristoph Lameter } 21401e8f889bSDavid Gibson 214157303d80SAndy Whitcroft /* 214257303d80SAndy Whitcroft * If we are going to COW a private mapping later, we examine the 214357303d80SAndy Whitcroft * pending reservations for this page now. This will ensure that 214457303d80SAndy Whitcroft * any allocations necessary to record that reservation occur outside 214557303d80SAndy Whitcroft * the spinlock. 214657303d80SAndy Whitcroft */ 2147788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 21482b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 21492b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 21502b26736cSAndy Whitcroft goto backout_unlocked; 21512b26736cSAndy Whitcroft } 215257303d80SAndy Whitcroft 2153ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 2154a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 21554c887265SAdam Litke if (idx >= size) 21564c887265SAdam Litke goto backout; 21574c887265SAdam Litke 215883c54070SNick Piggin ret = 0; 21597f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 21604c887265SAdam Litke goto backout; 21614c887265SAdam Litke 21621e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 21631e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 21641e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 21651e8f889bSDavid Gibson 2166788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 21671e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 216804f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 21691e8f889bSDavid Gibson } 21701e8f889bSDavid Gibson 2171ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 21724c887265SAdam Litke unlock_page(page); 21734c887265SAdam Litke out: 2174ac9b9c66SHugh Dickins return ret; 21754c887265SAdam Litke 21764c887265SAdam Litke backout: 21774c887265SAdam Litke spin_unlock(&mm->page_table_lock); 21782b26736cSAndy Whitcroft backout_unlocked: 21794c887265SAdam Litke unlock_page(page); 21804c887265SAdam Litke put_page(page); 21814c887265SAdam Litke goto out; 2182ac9b9c66SHugh Dickins } 2183ac9b9c66SHugh Dickins 218486e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2185788c7df4SHugh Dickins unsigned long address, unsigned int flags) 218686e5216fSAdam Litke { 218786e5216fSAdam Litke pte_t *ptep; 218886e5216fSAdam Litke pte_t entry; 21891e8f889bSDavid Gibson int ret; 219057303d80SAndy Whitcroft struct page *pagecache_page = NULL; 21913935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 2192a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 219386e5216fSAdam Litke 2194a5516438SAndi Kleen ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 219586e5216fSAdam Litke if (!ptep) 219686e5216fSAdam Litke return VM_FAULT_OOM; 219786e5216fSAdam Litke 21983935baa9SDavid Gibson /* 21993935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 22003935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 22013935baa9SDavid Gibson * the same page in the page cache. 22023935baa9SDavid Gibson */ 22033935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 22047f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 22057f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 2206788c7df4SHugh Dickins ret = hugetlb_no_page(mm, vma, address, ptep, flags); 2207b4d1d99fSDavid Gibson goto out_mutex; 22083935baa9SDavid Gibson } 220986e5216fSAdam Litke 221083c54070SNick Piggin ret = 0; 22111e8f889bSDavid Gibson 221257303d80SAndy Whitcroft /* 221357303d80SAndy Whitcroft * If we are going to COW the mapping later, we examine the pending 221457303d80SAndy Whitcroft * reservations for this page now. This will ensure that any 221557303d80SAndy Whitcroft * allocations necessary to record that reservation occur outside the 221657303d80SAndy Whitcroft * spinlock. For private mappings, we also lookup the pagecache 221757303d80SAndy Whitcroft * page now as it is used to determine if a reservation has been 221857303d80SAndy Whitcroft * consumed. 221957303d80SAndy Whitcroft */ 2220788c7df4SHugh Dickins if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) { 22212b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 22222b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 2223b4d1d99fSDavid Gibson goto out_mutex; 22242b26736cSAndy Whitcroft } 222557303d80SAndy Whitcroft 2226f83a275dSMel Gorman if (!(vma->vm_flags & VM_MAYSHARE)) 222757303d80SAndy Whitcroft pagecache_page = hugetlbfs_pagecache_page(h, 222857303d80SAndy Whitcroft vma, address); 222957303d80SAndy Whitcroft } 223057303d80SAndy Whitcroft 22311e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 22321e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 2233b4d1d99fSDavid Gibson if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 2234b4d1d99fSDavid Gibson goto out_page_table_lock; 2235b4d1d99fSDavid Gibson 2236b4d1d99fSDavid Gibson 2237788c7df4SHugh Dickins if (flags & FAULT_FLAG_WRITE) { 2238b4d1d99fSDavid Gibson if (!pte_write(entry)) { 223957303d80SAndy Whitcroft ret = hugetlb_cow(mm, vma, address, ptep, entry, 224057303d80SAndy Whitcroft pagecache_page); 2241b4d1d99fSDavid Gibson goto out_page_table_lock; 2242b4d1d99fSDavid Gibson } 2243b4d1d99fSDavid Gibson entry = pte_mkdirty(entry); 2244b4d1d99fSDavid Gibson } 2245b4d1d99fSDavid Gibson entry = pte_mkyoung(entry); 2246788c7df4SHugh Dickins if (huge_ptep_set_access_flags(vma, address, ptep, entry, 2247788c7df4SHugh Dickins flags & FAULT_FLAG_WRITE)) 2248b4d1d99fSDavid Gibson update_mmu_cache(vma, address, entry); 2249b4d1d99fSDavid Gibson 2250b4d1d99fSDavid Gibson out_page_table_lock: 22511e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 225257303d80SAndy Whitcroft 225357303d80SAndy Whitcroft if (pagecache_page) { 225457303d80SAndy Whitcroft unlock_page(pagecache_page); 225557303d80SAndy Whitcroft put_page(pagecache_page); 225657303d80SAndy Whitcroft } 225757303d80SAndy Whitcroft 2258b4d1d99fSDavid Gibson out_mutex: 22593935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 22601e8f889bSDavid Gibson 22611e8f889bSDavid Gibson return ret; 226286e5216fSAdam Litke } 226386e5216fSAdam Litke 2264ceb86879SAndi Kleen /* Can be overriden by architectures */ 2265ceb86879SAndi Kleen __attribute__((weak)) struct page * 2266ceb86879SAndi Kleen follow_huge_pud(struct mm_struct *mm, unsigned long address, 2267ceb86879SAndi Kleen pud_t *pud, int write) 2268ceb86879SAndi Kleen { 2269ceb86879SAndi Kleen BUG(); 2270ceb86879SAndi Kleen return NULL; 2271ceb86879SAndi Kleen } 2272ceb86879SAndi Kleen 227363551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 227463551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 22755b23dbe8SAdam Litke unsigned long *position, int *length, int i, 22762a15efc9SHugh Dickins unsigned int flags) 227763551ae0SDavid Gibson { 2278d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 2279d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 228063551ae0SDavid Gibson int remainder = *length; 2281a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 228263551ae0SDavid Gibson 22831c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 228463551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 228563551ae0SDavid Gibson pte_t *pte; 22862a15efc9SHugh Dickins int absent; 228763551ae0SDavid Gibson struct page *page; 228863551ae0SDavid Gibson 22894c887265SAdam Litke /* 22904c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 22912a15efc9SHugh Dickins * each hugepage. We have to make sure we get the 22924c887265SAdam Litke * first, for the page indexing below to work. 22934c887265SAdam Litke */ 2294a5516438SAndi Kleen pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 22952a15efc9SHugh Dickins absent = !pte || huge_pte_none(huge_ptep_get(pte)); 229663551ae0SDavid Gibson 22972a15efc9SHugh Dickins /* 22982a15efc9SHugh Dickins * When coredumping, it suits get_dump_page if we just return 22993ae77f43SHugh Dickins * an error where there's an empty slot with no huge pagecache 23003ae77f43SHugh Dickins * to back it. This way, we avoid allocating a hugepage, and 23013ae77f43SHugh Dickins * the sparse dumpfile avoids allocating disk blocks, but its 23023ae77f43SHugh Dickins * huge holes still show up with zeroes where they need to be. 23032a15efc9SHugh Dickins */ 23043ae77f43SHugh Dickins if (absent && (flags & FOLL_DUMP) && 23053ae77f43SHugh Dickins !hugetlbfs_pagecache_present(h, vma, vaddr)) { 23062a15efc9SHugh Dickins remainder = 0; 23072a15efc9SHugh Dickins break; 23082a15efc9SHugh Dickins } 23092a15efc9SHugh Dickins 23102a15efc9SHugh Dickins if (absent || 23112a15efc9SHugh Dickins ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { 23124c887265SAdam Litke int ret; 23134c887265SAdam Litke 23144c887265SAdam Litke spin_unlock(&mm->page_table_lock); 23152a15efc9SHugh Dickins ret = hugetlb_fault(mm, vma, vaddr, 23162a15efc9SHugh Dickins (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); 23174c887265SAdam Litke spin_lock(&mm->page_table_lock); 2318a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 23194c887265SAdam Litke continue; 23204c887265SAdam Litke 23211c59827dSHugh Dickins remainder = 0; 23221c59827dSHugh Dickins break; 23231c59827dSHugh Dickins } 232463551ae0SDavid Gibson 2325a5516438SAndi Kleen pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 23267f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 2327d5d4b0aaSChen, Kenneth W same_page: 2328d6692183SChen, Kenneth W if (pages) { 232969d177c2SAndy Whitcroft pages[i] = mem_map_offset(page, pfn_offset); 23304b2e38adSKOSAKI Motohiro get_page(pages[i]); 2331d6692183SChen, Kenneth W } 233263551ae0SDavid Gibson 233363551ae0SDavid Gibson if (vmas) 233463551ae0SDavid Gibson vmas[i] = vma; 233563551ae0SDavid Gibson 233663551ae0SDavid Gibson vaddr += PAGE_SIZE; 2337d5d4b0aaSChen, Kenneth W ++pfn_offset; 233863551ae0SDavid Gibson --remainder; 233963551ae0SDavid Gibson ++i; 2340d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 2341a5516438SAndi Kleen pfn_offset < pages_per_huge_page(h)) { 2342d5d4b0aaSChen, Kenneth W /* 2343d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 2344d5d4b0aaSChen, Kenneth W * of this compound page. 2345d5d4b0aaSChen, Kenneth W */ 2346d5d4b0aaSChen, Kenneth W goto same_page; 2347d5d4b0aaSChen, Kenneth W } 234863551ae0SDavid Gibson } 23491c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 235063551ae0SDavid Gibson *length = remainder; 235163551ae0SDavid Gibson *position = vaddr; 235263551ae0SDavid Gibson 23532a15efc9SHugh Dickins return i ? i : -EFAULT; 235463551ae0SDavid Gibson } 23558f860591SZhang, Yanmin 23568f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 23578f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 23588f860591SZhang, Yanmin { 23598f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 23608f860591SZhang, Yanmin unsigned long start = address; 23618f860591SZhang, Yanmin pte_t *ptep; 23628f860591SZhang, Yanmin pte_t pte; 2363a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 23648f860591SZhang, Yanmin 23658f860591SZhang, Yanmin BUG_ON(address >= end); 23668f860591SZhang, Yanmin flush_cache_range(vma, address, end); 23678f860591SZhang, Yanmin 236839dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 23698f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 2370a5516438SAndi Kleen for (; address < end; address += huge_page_size(h)) { 23718f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 23728f860591SZhang, Yanmin if (!ptep) 23738f860591SZhang, Yanmin continue; 237439dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 237539dde65cSChen, Kenneth W continue; 23767f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 23778f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 23788f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 23798f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 23808f860591SZhang, Yanmin } 23818f860591SZhang, Yanmin } 23828f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 238339dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 23848f860591SZhang, Yanmin 23858f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 23868f860591SZhang, Yanmin } 23878f860591SZhang, Yanmin 2388a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 2389a1e78772SMel Gorman long from, long to, 23905a6fe125SMel Gorman struct vm_area_struct *vma, 23915a6fe125SMel Gorman int acctflag) 2392e4e574b7SAdam Litke { 239317c9d12eSMel Gorman long ret, chg; 2394a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 2395e4e574b7SAdam Litke 2396a1e78772SMel Gorman /* 239717c9d12eSMel Gorman * Only apply hugepage reservation if asked. At fault time, an 239817c9d12eSMel Gorman * attempt will be made for VM_NORESERVE to allocate a page 239917c9d12eSMel Gorman * and filesystem quota without using reserves 240017c9d12eSMel Gorman */ 240117c9d12eSMel Gorman if (acctflag & VM_NORESERVE) 240217c9d12eSMel Gorman return 0; 240317c9d12eSMel Gorman 240417c9d12eSMel Gorman /* 2405a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 2406a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 2407a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 2408a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 2409a1e78772SMel Gorman */ 2410f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 2411e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 24125a6fe125SMel Gorman else { 24135a6fe125SMel Gorman struct resv_map *resv_map = resv_map_alloc(); 24145a6fe125SMel Gorman if (!resv_map) 24155a6fe125SMel Gorman return -ENOMEM; 24165a6fe125SMel Gorman 241717c9d12eSMel Gorman chg = to - from; 241817c9d12eSMel Gorman 24195a6fe125SMel Gorman set_vma_resv_map(vma, resv_map); 24205a6fe125SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 24215a6fe125SMel Gorman } 24225a6fe125SMel Gorman 242317c9d12eSMel Gorman if (chg < 0) 242417c9d12eSMel Gorman return chg; 242517c9d12eSMel Gorman 242617c9d12eSMel Gorman /* There must be enough filesystem quota for the mapping */ 242717c9d12eSMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 242817c9d12eSMel Gorman return -ENOSPC; 242917c9d12eSMel Gorman 243017c9d12eSMel Gorman /* 243117c9d12eSMel Gorman * Check enough hugepages are available for the reservation. 243217c9d12eSMel Gorman * Hand back the quota if there are not 243317c9d12eSMel Gorman */ 243417c9d12eSMel Gorman ret = hugetlb_acct_memory(h, chg); 243517c9d12eSMel Gorman if (ret < 0) { 243617c9d12eSMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 243717c9d12eSMel Gorman return ret; 243817c9d12eSMel Gorman } 243917c9d12eSMel Gorman 244017c9d12eSMel Gorman /* 244117c9d12eSMel Gorman * Account for the reservations made. Shared mappings record regions 244217c9d12eSMel Gorman * that have reservations as they are shared by multiple VMAs. 244317c9d12eSMel Gorman * When the last VMA disappears, the region map says how much 244417c9d12eSMel Gorman * the reservation was and the page cache tells how much of 244517c9d12eSMel Gorman * the reservation was consumed. Private mappings are per-VMA and 244617c9d12eSMel Gorman * only the consumed reservations are tracked. When the VMA 244717c9d12eSMel Gorman * disappears, the original reservation is the VMA size and the 244817c9d12eSMel Gorman * consumed reservations are stored in the map. Hence, nothing 244917c9d12eSMel Gorman * else has to be done for private mappings here 245017c9d12eSMel Gorman */ 2451f83a275dSMel Gorman if (!vma || vma->vm_flags & VM_MAYSHARE) 245217c9d12eSMel Gorman region_add(&inode->i_mapping->private_list, from, to); 2453a43a8c39SChen, Kenneth W return 0; 2454a43a8c39SChen, Kenneth W } 2455a43a8c39SChen, Kenneth W 2456a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 2457a43a8c39SChen, Kenneth W { 2458a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 2459a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 246045c682a6SKen Chen 246145c682a6SKen Chen spin_lock(&inode->i_lock); 2462e4c6f8beSEric Sandeen inode->i_blocks -= (blocks_per_huge_page(h) * freed); 246345c682a6SKen Chen spin_unlock(&inode->i_lock); 246445c682a6SKen Chen 246590d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 2466a5516438SAndi Kleen hugetlb_acct_memory(h, -(chg - freed)); 2467a43a8c39SChen, Kenneth W } 2468