11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 10e1759c21SAlexey Dobriyan #include <linux/seq_file.h> 111da177e4SLinus Torvalds #include <linux/sysctl.h> 121da177e4SLinus Torvalds #include <linux/highmem.h> 13cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 141da177e4SLinus Torvalds #include <linux/nodemask.h> 1563551ae0SDavid Gibson #include <linux/pagemap.h> 165da7ca86SChristoph Lameter #include <linux/mempolicy.h> 17aea47ff3SChristoph Lameter #include <linux/cpuset.h> 183935baa9SDavid Gibson #include <linux/mutex.h> 19aa888a74SAndi Kleen #include <linux/bootmem.h> 20a3437870SNishanth Aravamudan #include <linux/sysfs.h> 21d6606683SLinus Torvalds 2263551ae0SDavid Gibson #include <asm/page.h> 2363551ae0SDavid Gibson #include <asm/pgtable.h> 2478a34ae2SAdrian Bunk #include <asm/io.h> 2563551ae0SDavid Gibson 2663551ae0SDavid Gibson #include <linux/hugetlb.h> 277835e98bSNick Piggin #include "internal.h" 281da177e4SLinus Torvalds 291da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 30396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 31396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 32a5516438SAndi Kleen 33e5ff2159SAndi Kleen static int max_hstate; 34e5ff2159SAndi Kleen unsigned int default_hstate_idx; 35e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE]; 36e5ff2159SAndi Kleen 3753ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages); 3853ba51d2SJon Tollefson 39e5ff2159SAndi Kleen /* for command line parsing */ 40e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate; 41e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages; 42e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size; 43e5ff2159SAndi Kleen 44e5ff2159SAndi Kleen #define for_each_hstate(h) \ 45e5ff2159SAndi Kleen for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++) 46396faf03SMel Gorman 473935baa9SDavid Gibson /* 483935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 493935baa9SDavid Gibson */ 503935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 510bd0f9fbSEric Paris 52e7c4b0bfSAndy Whitcroft /* 5396822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 5496822904SAndy Whitcroft * across the pages in a mapping. 5584afd99bSAndy Whitcroft * 5684afd99bSAndy Whitcroft * The region data structures are protected by a combination of the mmap_sem 5784afd99bSAndy Whitcroft * and the hugetlb_instantion_mutex. To access or modify a region the caller 5884afd99bSAndy Whitcroft * must either hold the mmap_sem for write, or the mmap_sem for read and 5984afd99bSAndy Whitcroft * the hugetlb_instantiation mutex: 6084afd99bSAndy Whitcroft * 6184afd99bSAndy Whitcroft * down_write(&mm->mmap_sem); 6284afd99bSAndy Whitcroft * or 6384afd99bSAndy Whitcroft * down_read(&mm->mmap_sem); 6484afd99bSAndy Whitcroft * mutex_lock(&hugetlb_instantiation_mutex); 6596822904SAndy Whitcroft */ 6696822904SAndy Whitcroft struct file_region { 6796822904SAndy Whitcroft struct list_head link; 6896822904SAndy Whitcroft long from; 6996822904SAndy Whitcroft long to; 7096822904SAndy Whitcroft }; 7196822904SAndy Whitcroft 7296822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 7396822904SAndy Whitcroft { 7496822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 7596822904SAndy Whitcroft 7696822904SAndy Whitcroft /* Locate the region we are either in or before. */ 7796822904SAndy Whitcroft list_for_each_entry(rg, head, link) 7896822904SAndy Whitcroft if (f <= rg->to) 7996822904SAndy Whitcroft break; 8096822904SAndy Whitcroft 8196822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 8296822904SAndy Whitcroft if (f > rg->from) 8396822904SAndy Whitcroft f = rg->from; 8496822904SAndy Whitcroft 8596822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 8696822904SAndy Whitcroft nrg = rg; 8796822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 8896822904SAndy Whitcroft if (&rg->link == head) 8996822904SAndy Whitcroft break; 9096822904SAndy Whitcroft if (rg->from > t) 9196822904SAndy Whitcroft break; 9296822904SAndy Whitcroft 9396822904SAndy Whitcroft /* If this area reaches higher then extend our area to 9496822904SAndy Whitcroft * include it completely. If this is not the first area 9596822904SAndy Whitcroft * which we intend to reuse, free it. */ 9696822904SAndy Whitcroft if (rg->to > t) 9796822904SAndy Whitcroft t = rg->to; 9896822904SAndy Whitcroft if (rg != nrg) { 9996822904SAndy Whitcroft list_del(&rg->link); 10096822904SAndy Whitcroft kfree(rg); 10196822904SAndy Whitcroft } 10296822904SAndy Whitcroft } 10396822904SAndy Whitcroft nrg->from = f; 10496822904SAndy Whitcroft nrg->to = t; 10596822904SAndy Whitcroft return 0; 10696822904SAndy Whitcroft } 10796822904SAndy Whitcroft 10896822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 10996822904SAndy Whitcroft { 11096822904SAndy Whitcroft struct file_region *rg, *nrg; 11196822904SAndy Whitcroft long chg = 0; 11296822904SAndy Whitcroft 11396822904SAndy Whitcroft /* Locate the region we are before or in. */ 11496822904SAndy Whitcroft list_for_each_entry(rg, head, link) 11596822904SAndy Whitcroft if (f <= rg->to) 11696822904SAndy Whitcroft break; 11796822904SAndy Whitcroft 11896822904SAndy Whitcroft /* If we are below the current region then a new region is required. 11996822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 12096822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 12196822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 12296822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 12396822904SAndy Whitcroft if (!nrg) 12496822904SAndy Whitcroft return -ENOMEM; 12596822904SAndy Whitcroft nrg->from = f; 12696822904SAndy Whitcroft nrg->to = f; 12796822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 12896822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 12996822904SAndy Whitcroft 13096822904SAndy Whitcroft return t - f; 13196822904SAndy Whitcroft } 13296822904SAndy Whitcroft 13396822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 13496822904SAndy Whitcroft if (f > rg->from) 13596822904SAndy Whitcroft f = rg->from; 13696822904SAndy Whitcroft chg = t - f; 13796822904SAndy Whitcroft 13896822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 13996822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 14096822904SAndy Whitcroft if (&rg->link == head) 14196822904SAndy Whitcroft break; 14296822904SAndy Whitcroft if (rg->from > t) 14396822904SAndy Whitcroft return chg; 14496822904SAndy Whitcroft 14596822904SAndy Whitcroft /* We overlap with this area, if it extends futher than 14696822904SAndy Whitcroft * us then we must extend ourselves. Account for its 14796822904SAndy Whitcroft * existing reservation. */ 14896822904SAndy Whitcroft if (rg->to > t) { 14996822904SAndy Whitcroft chg += rg->to - t; 15096822904SAndy Whitcroft t = rg->to; 15196822904SAndy Whitcroft } 15296822904SAndy Whitcroft chg -= rg->to - rg->from; 15396822904SAndy Whitcroft } 15496822904SAndy Whitcroft return chg; 15596822904SAndy Whitcroft } 15696822904SAndy Whitcroft 15796822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 15896822904SAndy Whitcroft { 15996822904SAndy Whitcroft struct file_region *rg, *trg; 16096822904SAndy Whitcroft long chg = 0; 16196822904SAndy Whitcroft 16296822904SAndy Whitcroft /* Locate the region we are either in or before. */ 16396822904SAndy Whitcroft list_for_each_entry(rg, head, link) 16496822904SAndy Whitcroft if (end <= rg->to) 16596822904SAndy Whitcroft break; 16696822904SAndy Whitcroft if (&rg->link == head) 16796822904SAndy Whitcroft return 0; 16896822904SAndy Whitcroft 16996822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 17096822904SAndy Whitcroft if (end > rg->from) { 17196822904SAndy Whitcroft chg = rg->to - end; 17296822904SAndy Whitcroft rg->to = end; 17396822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 17496822904SAndy Whitcroft } 17596822904SAndy Whitcroft 17696822904SAndy Whitcroft /* Drop any remaining regions. */ 17796822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 17896822904SAndy Whitcroft if (&rg->link == head) 17996822904SAndy Whitcroft break; 18096822904SAndy Whitcroft chg += rg->to - rg->from; 18196822904SAndy Whitcroft list_del(&rg->link); 18296822904SAndy Whitcroft kfree(rg); 18396822904SAndy Whitcroft } 18496822904SAndy Whitcroft return chg; 18596822904SAndy Whitcroft } 18696822904SAndy Whitcroft 18784afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t) 18884afd99bSAndy Whitcroft { 18984afd99bSAndy Whitcroft struct file_region *rg; 19084afd99bSAndy Whitcroft long chg = 0; 19184afd99bSAndy Whitcroft 19284afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 19384afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 19484afd99bSAndy Whitcroft int seg_from; 19584afd99bSAndy Whitcroft int seg_to; 19684afd99bSAndy Whitcroft 19784afd99bSAndy Whitcroft if (rg->to <= f) 19884afd99bSAndy Whitcroft continue; 19984afd99bSAndy Whitcroft if (rg->from >= t) 20084afd99bSAndy Whitcroft break; 20184afd99bSAndy Whitcroft 20284afd99bSAndy Whitcroft seg_from = max(rg->from, f); 20384afd99bSAndy Whitcroft seg_to = min(rg->to, t); 20484afd99bSAndy Whitcroft 20584afd99bSAndy Whitcroft chg += seg_to - seg_from; 20684afd99bSAndy Whitcroft } 20784afd99bSAndy Whitcroft 20884afd99bSAndy Whitcroft return chg; 20984afd99bSAndy Whitcroft } 21084afd99bSAndy Whitcroft 21196822904SAndy Whitcroft /* 212e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 213e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 214e7c4b0bfSAndy Whitcroft */ 215a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h, 216a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 217e7c4b0bfSAndy Whitcroft { 218a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) + 219a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h)); 220e7c4b0bfSAndy Whitcroft } 221e7c4b0bfSAndy Whitcroft 22284afd99bSAndy Whitcroft /* 22308fba699SMel Gorman * Return the size of the pages allocated when backing a VMA. In the majority 22408fba699SMel Gorman * cases this will be same size as used by the page table entries. 22508fba699SMel Gorman */ 22608fba699SMel Gorman unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 22708fba699SMel Gorman { 22808fba699SMel Gorman struct hstate *hstate; 22908fba699SMel Gorman 23008fba699SMel Gorman if (!is_vm_hugetlb_page(vma)) 23108fba699SMel Gorman return PAGE_SIZE; 23208fba699SMel Gorman 23308fba699SMel Gorman hstate = hstate_vma(vma); 23408fba699SMel Gorman 23508fba699SMel Gorman return 1UL << (hstate->order + PAGE_SHIFT); 23608fba699SMel Gorman } 23708fba699SMel Gorman 23808fba699SMel Gorman /* 2393340289dSMel Gorman * Return the page size being used by the MMU to back a VMA. In the majority 2403340289dSMel Gorman * of cases, the page size used by the kernel matches the MMU size. On 2413340289dSMel Gorman * architectures where it differs, an architecture-specific version of this 2423340289dSMel Gorman * function is required. 2433340289dSMel Gorman */ 2443340289dSMel Gorman #ifndef vma_mmu_pagesize 2453340289dSMel Gorman unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 2463340289dSMel Gorman { 2473340289dSMel Gorman return vma_kernel_pagesize(vma); 2483340289dSMel Gorman } 2493340289dSMel Gorman #endif 2503340289dSMel Gorman 2513340289dSMel Gorman /* 25284afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 25384afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 25484afd99bSAndy Whitcroft * alignment. 25584afd99bSAndy Whitcroft */ 25684afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 25784afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 25804f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 25984afd99bSAndy Whitcroft 260a1e78772SMel Gorman /* 261a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 262a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 263a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 264a1e78772SMel Gorman * 265a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 266a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 267a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 268a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 26984afd99bSAndy Whitcroft * 27084afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 27184afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 27284afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 27384afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 27484afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 27584afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 27684afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 27784afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 278a1e78772SMel Gorman */ 279e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 280e7c4b0bfSAndy Whitcroft { 281e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 282e7c4b0bfSAndy Whitcroft } 283e7c4b0bfSAndy Whitcroft 284e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 285e7c4b0bfSAndy Whitcroft unsigned long value) 286e7c4b0bfSAndy Whitcroft { 287e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 288e7c4b0bfSAndy Whitcroft } 289e7c4b0bfSAndy Whitcroft 29084afd99bSAndy Whitcroft struct resv_map { 29184afd99bSAndy Whitcroft struct kref refs; 29284afd99bSAndy Whitcroft struct list_head regions; 29384afd99bSAndy Whitcroft }; 29484afd99bSAndy Whitcroft 2952a4b3dedSHarvey Harrison static struct resv_map *resv_map_alloc(void) 29684afd99bSAndy Whitcroft { 29784afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 29884afd99bSAndy Whitcroft if (!resv_map) 29984afd99bSAndy Whitcroft return NULL; 30084afd99bSAndy Whitcroft 30184afd99bSAndy Whitcroft kref_init(&resv_map->refs); 30284afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 30384afd99bSAndy Whitcroft 30484afd99bSAndy Whitcroft return resv_map; 30584afd99bSAndy Whitcroft } 30684afd99bSAndy Whitcroft 3072a4b3dedSHarvey Harrison static void resv_map_release(struct kref *ref) 30884afd99bSAndy Whitcroft { 30984afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 31084afd99bSAndy Whitcroft 31184afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 31284afd99bSAndy Whitcroft region_truncate(&resv_map->regions, 0); 31384afd99bSAndy Whitcroft kfree(resv_map); 31484afd99bSAndy Whitcroft } 31584afd99bSAndy Whitcroft 31684afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 317a1e78772SMel Gorman { 318a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 319a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 32084afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 32184afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 3222a4b3dedSHarvey Harrison return NULL; 323a1e78772SMel Gorman } 324a1e78772SMel Gorman 32584afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 326a1e78772SMel Gorman { 327a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 328a1e78772SMel Gorman VM_BUG_ON(vma->vm_flags & VM_SHARED); 329a1e78772SMel Gorman 33084afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 33184afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 33204f2cbe3SMel Gorman } 33304f2cbe3SMel Gorman 33404f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 33504f2cbe3SMel Gorman { 33604f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 337e7c4b0bfSAndy Whitcroft VM_BUG_ON(vma->vm_flags & VM_SHARED); 338e7c4b0bfSAndy Whitcroft 339e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 34004f2cbe3SMel Gorman } 34104f2cbe3SMel Gorman 34204f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 34304f2cbe3SMel Gorman { 34404f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 345e7c4b0bfSAndy Whitcroft 346e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 347a1e78772SMel Gorman } 348a1e78772SMel Gorman 349a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */ 350a5516438SAndi Kleen static void decrement_hugepage_resv_vma(struct hstate *h, 351a5516438SAndi Kleen struct vm_area_struct *vma) 352a1e78772SMel Gorman { 353c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_NORESERVE) 354c37f9fb1SAndy Whitcroft return; 355c37f9fb1SAndy Whitcroft 356a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) { 357a1e78772SMel Gorman /* Shared mappings always use reserves */ 358a5516438SAndi Kleen h->resv_huge_pages--; 35984afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 360a1e78772SMel Gorman /* 361a1e78772SMel Gorman * Only the process that called mmap() has reserves for 362a1e78772SMel Gorman * private mappings. 363a1e78772SMel Gorman */ 364a5516438SAndi Kleen h->resv_huge_pages--; 365a1e78772SMel Gorman } 366a1e78772SMel Gorman } 367a1e78772SMel Gorman 36804f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 369a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 370a1e78772SMel Gorman { 371a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 372a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 373a1e78772SMel Gorman vma->vm_private_data = (void *)0; 374a1e78772SMel Gorman } 375a1e78772SMel Gorman 376a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 3777f09ca51SMel Gorman static int vma_has_reserves(struct vm_area_struct *vma) 378a1e78772SMel Gorman { 379a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) 380a1e78772SMel Gorman return 1; 3817f09ca51SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3827f09ca51SMel Gorman return 1; 3837f09ca51SMel Gorman return 0; 384a1e78772SMel Gorman } 385a1e78772SMel Gorman 38669d177c2SAndy Whitcroft static void clear_gigantic_page(struct page *page, 38769d177c2SAndy Whitcroft unsigned long addr, unsigned long sz) 38869d177c2SAndy Whitcroft { 38969d177c2SAndy Whitcroft int i; 39069d177c2SAndy Whitcroft struct page *p = page; 39169d177c2SAndy Whitcroft 39269d177c2SAndy Whitcroft might_sleep(); 39369d177c2SAndy Whitcroft for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) { 39469d177c2SAndy Whitcroft cond_resched(); 39569d177c2SAndy Whitcroft clear_user_highpage(p, addr + i * PAGE_SIZE); 39669d177c2SAndy Whitcroft } 39769d177c2SAndy Whitcroft } 398a5516438SAndi Kleen static void clear_huge_page(struct page *page, 399a5516438SAndi Kleen unsigned long addr, unsigned long sz) 40079ac6ba4SDavid Gibson { 40179ac6ba4SDavid Gibson int i; 40279ac6ba4SDavid Gibson 403ebdd4aeaSHannes Eder if (unlikely(sz > MAX_ORDER_NR_PAGES)) { 404ebdd4aeaSHannes Eder clear_gigantic_page(page, addr, sz); 405ebdd4aeaSHannes Eder return; 406ebdd4aeaSHannes Eder } 40769d177c2SAndy Whitcroft 40879ac6ba4SDavid Gibson might_sleep(); 409a5516438SAndi Kleen for (i = 0; i < sz/PAGE_SIZE; i++) { 41079ac6ba4SDavid Gibson cond_resched(); 411281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 41279ac6ba4SDavid Gibson } 41379ac6ba4SDavid Gibson } 41479ac6ba4SDavid Gibson 41569d177c2SAndy Whitcroft static void copy_gigantic_page(struct page *dst, struct page *src, 41669d177c2SAndy Whitcroft unsigned long addr, struct vm_area_struct *vma) 41769d177c2SAndy Whitcroft { 41869d177c2SAndy Whitcroft int i; 41969d177c2SAndy Whitcroft struct hstate *h = hstate_vma(vma); 42069d177c2SAndy Whitcroft struct page *dst_base = dst; 42169d177c2SAndy Whitcroft struct page *src_base = src; 42269d177c2SAndy Whitcroft might_sleep(); 42369d177c2SAndy Whitcroft for (i = 0; i < pages_per_huge_page(h); ) { 42469d177c2SAndy Whitcroft cond_resched(); 42569d177c2SAndy Whitcroft copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); 42669d177c2SAndy Whitcroft 42769d177c2SAndy Whitcroft i++; 42869d177c2SAndy Whitcroft dst = mem_map_next(dst, dst_base, i); 42969d177c2SAndy Whitcroft src = mem_map_next(src, src_base, i); 43069d177c2SAndy Whitcroft } 43169d177c2SAndy Whitcroft } 43279ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 4339de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 43479ac6ba4SDavid Gibson { 43579ac6ba4SDavid Gibson int i; 436a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 43779ac6ba4SDavid Gibson 438ebdd4aeaSHannes Eder if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { 439ebdd4aeaSHannes Eder copy_gigantic_page(dst, src, addr, vma); 440ebdd4aeaSHannes Eder return; 441ebdd4aeaSHannes Eder } 44269d177c2SAndy Whitcroft 44379ac6ba4SDavid Gibson might_sleep(); 444a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 44579ac6ba4SDavid Gibson cond_resched(); 4469de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 44779ac6ba4SDavid Gibson } 44879ac6ba4SDavid Gibson } 44979ac6ba4SDavid Gibson 450a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page) 4511da177e4SLinus Torvalds { 4521da177e4SLinus Torvalds int nid = page_to_nid(page); 453a5516438SAndi Kleen list_add(&page->lru, &h->hugepage_freelists[nid]); 454a5516438SAndi Kleen h->free_huge_pages++; 455a5516438SAndi Kleen h->free_huge_pages_node[nid]++; 4561da177e4SLinus Torvalds } 4571da177e4SLinus Torvalds 458a5516438SAndi Kleen static struct page *dequeue_huge_page(struct hstate *h) 459348e1e04SNishanth Aravamudan { 460348e1e04SNishanth Aravamudan int nid; 461348e1e04SNishanth Aravamudan struct page *page = NULL; 462348e1e04SNishanth Aravamudan 463348e1e04SNishanth Aravamudan for (nid = 0; nid < MAX_NUMNODES; ++nid) { 464a5516438SAndi Kleen if (!list_empty(&h->hugepage_freelists[nid])) { 465a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 466348e1e04SNishanth Aravamudan struct page, lru); 467348e1e04SNishanth Aravamudan list_del(&page->lru); 468a5516438SAndi Kleen h->free_huge_pages--; 469a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 470348e1e04SNishanth Aravamudan break; 471348e1e04SNishanth Aravamudan } 472348e1e04SNishanth Aravamudan } 473348e1e04SNishanth Aravamudan return page; 474348e1e04SNishanth Aravamudan } 475348e1e04SNishanth Aravamudan 476a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h, 477a5516438SAndi Kleen struct vm_area_struct *vma, 47804f2cbe3SMel Gorman unsigned long address, int avoid_reserve) 4791da177e4SLinus Torvalds { 48031a5c6e4SNishanth Aravamudan int nid; 4811da177e4SLinus Torvalds struct page *page = NULL; 482480eccf9SLee Schermerhorn struct mempolicy *mpol; 48319770b32SMel Gorman nodemask_t *nodemask; 484396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 48519770b32SMel Gorman htlb_alloc_mask, &mpol, &nodemask); 486dd1a239fSMel Gorman struct zone *zone; 487dd1a239fSMel Gorman struct zoneref *z; 4881da177e4SLinus Torvalds 489a1e78772SMel Gorman /* 490a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 491a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 492a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 493a1e78772SMel Gorman */ 4947f09ca51SMel Gorman if (!vma_has_reserves(vma) && 495a5516438SAndi Kleen h->free_huge_pages - h->resv_huge_pages == 0) 496a1e78772SMel Gorman return NULL; 497a1e78772SMel Gorman 49804f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 499a5516438SAndi Kleen if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 50004f2cbe3SMel Gorman return NULL; 50104f2cbe3SMel Gorman 50219770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 50319770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 50454a6eb5cSMel Gorman nid = zone_to_nid(zone); 50554a6eb5cSMel Gorman if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 506a5516438SAndi Kleen !list_empty(&h->hugepage_freelists[nid])) { 507a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 5081da177e4SLinus Torvalds struct page, lru); 5091da177e4SLinus Torvalds list_del(&page->lru); 510a5516438SAndi Kleen h->free_huge_pages--; 511a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 51204f2cbe3SMel Gorman 51304f2cbe3SMel Gorman if (!avoid_reserve) 514a5516438SAndi Kleen decrement_hugepage_resv_vma(h, vma); 515a1e78772SMel Gorman 5165ab3ee7bSKen Chen break; 5171da177e4SLinus Torvalds } 5183abf7afdSAndrew Morton } 51952cd3b07SLee Schermerhorn mpol_cond_put(mpol); 5201da177e4SLinus Torvalds return page; 5211da177e4SLinus Torvalds } 5221da177e4SLinus Torvalds 523a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page) 5246af2acb6SAdam Litke { 5256af2acb6SAdam Litke int i; 526a5516438SAndi Kleen 52718229df5SAndy Whitcroft VM_BUG_ON(h->order >= MAX_ORDER); 52818229df5SAndy Whitcroft 529a5516438SAndi Kleen h->nr_huge_pages--; 530a5516438SAndi Kleen h->nr_huge_pages_node[page_to_nid(page)]--; 531a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 5326af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 5336af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 5346af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 5356af2acb6SAdam Litke } 5366af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 5376af2acb6SAdam Litke set_page_refcounted(page); 5387f2e9525SGerald Schaefer arch_release_hugepage(page); 539a5516438SAndi Kleen __free_pages(page, huge_page_order(h)); 5406af2acb6SAdam Litke } 5416af2acb6SAdam Litke 542e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size) 543e5ff2159SAndi Kleen { 544e5ff2159SAndi Kleen struct hstate *h; 545e5ff2159SAndi Kleen 546e5ff2159SAndi Kleen for_each_hstate(h) { 547e5ff2159SAndi Kleen if (huge_page_size(h) == size) 548e5ff2159SAndi Kleen return h; 549e5ff2159SAndi Kleen } 550e5ff2159SAndi Kleen return NULL; 551e5ff2159SAndi Kleen } 552e5ff2159SAndi Kleen 55327a85ef1SDavid Gibson static void free_huge_page(struct page *page) 55427a85ef1SDavid Gibson { 555a5516438SAndi Kleen /* 556a5516438SAndi Kleen * Can't pass hstate in here because it is called from the 557a5516438SAndi Kleen * compound page destructor. 558a5516438SAndi Kleen */ 559e5ff2159SAndi Kleen struct hstate *h = page_hstate(page); 5607893d1d5SAdam Litke int nid = page_to_nid(page); 561c79fb75eSAdam Litke struct address_space *mapping; 56227a85ef1SDavid Gibson 563c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 564e5df70abSAndy Whitcroft set_page_private(page, 0); 5657893d1d5SAdam Litke BUG_ON(page_count(page)); 56627a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 56727a85ef1SDavid Gibson 56827a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 569aa888a74SAndi Kleen if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { 570a5516438SAndi Kleen update_and_free_page(h, page); 571a5516438SAndi Kleen h->surplus_huge_pages--; 572a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 5737893d1d5SAdam Litke } else { 574a5516438SAndi Kleen enqueue_huge_page(h, page); 5757893d1d5SAdam Litke } 57627a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 577c79fb75eSAdam Litke if (mapping) 5789a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 57927a85ef1SDavid Gibson } 58027a85ef1SDavid Gibson 5817893d1d5SAdam Litke /* 5827893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 5837893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 5847893d1d5SAdam Litke * Returns 1 if an adjustment was made. 5857893d1d5SAdam Litke */ 586a5516438SAndi Kleen static int adjust_pool_surplus(struct hstate *h, int delta) 5877893d1d5SAdam Litke { 5887893d1d5SAdam Litke static int prev_nid; 5897893d1d5SAdam Litke int nid = prev_nid; 5907893d1d5SAdam Litke int ret = 0; 5917893d1d5SAdam Litke 5927893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 5937893d1d5SAdam Litke do { 5947893d1d5SAdam Litke nid = next_node(nid, node_online_map); 5957893d1d5SAdam Litke if (nid == MAX_NUMNODES) 5967893d1d5SAdam Litke nid = first_node(node_online_map); 5977893d1d5SAdam Litke 5987893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 599a5516438SAndi Kleen if (delta < 0 && !h->surplus_huge_pages_node[nid]) 6007893d1d5SAdam Litke continue; 6017893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 602a5516438SAndi Kleen if (delta > 0 && h->surplus_huge_pages_node[nid] >= 603a5516438SAndi Kleen h->nr_huge_pages_node[nid]) 6047893d1d5SAdam Litke continue; 6057893d1d5SAdam Litke 606a5516438SAndi Kleen h->surplus_huge_pages += delta; 607a5516438SAndi Kleen h->surplus_huge_pages_node[nid] += delta; 6087893d1d5SAdam Litke ret = 1; 6097893d1d5SAdam Litke break; 6107893d1d5SAdam Litke } while (nid != prev_nid); 6117893d1d5SAdam Litke 6127893d1d5SAdam Litke prev_nid = nid; 6137893d1d5SAdam Litke return ret; 6147893d1d5SAdam Litke } 6157893d1d5SAdam Litke 616a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 617b7ba30c6SAndi Kleen { 618b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 619b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 620a5516438SAndi Kleen h->nr_huge_pages++; 621a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 622b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 623b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 624b7ba30c6SAndi Kleen } 625b7ba30c6SAndi Kleen 626a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 6271da177e4SLinus Torvalds { 6281da177e4SLinus Torvalds struct page *page; 629f96efd58SJoe Jin 630aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 631aa888a74SAndi Kleen return NULL; 632aa888a74SAndi Kleen 63363b4613cSNishanth Aravamudan page = alloc_pages_node(nid, 634551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 635551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 636a5516438SAndi Kleen huge_page_order(h)); 6371da177e4SLinus Torvalds if (page) { 6387f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 639caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 6407b8ee84dSHarvey Harrison return NULL; 6417f2e9525SGerald Schaefer } 642a5516438SAndi Kleen prep_new_huge_page(h, page, nid); 6431da177e4SLinus Torvalds } 64463b4613cSNishanth Aravamudan 64563b4613cSNishanth Aravamudan return page; 64663b4613cSNishanth Aravamudan } 64763b4613cSNishanth Aravamudan 6485ced66c9SAndi Kleen /* 6495ced66c9SAndi Kleen * Use a helper variable to find the next node and then 6505ced66c9SAndi Kleen * copy it back to hugetlb_next_nid afterwards: 6515ced66c9SAndi Kleen * otherwise there's a window in which a racer might 6525ced66c9SAndi Kleen * pass invalid nid MAX_NUMNODES to alloc_pages_node. 6535ced66c9SAndi Kleen * But we don't need to use a spin_lock here: it really 6545ced66c9SAndi Kleen * doesn't matter if occasionally a racer chooses the 6555ced66c9SAndi Kleen * same nid as we do. Move nid forward in the mask even 6565ced66c9SAndi Kleen * if we just successfully allocated a hugepage so that 6575ced66c9SAndi Kleen * the next caller gets hugepages on the next node. 6585ced66c9SAndi Kleen */ 6595ced66c9SAndi Kleen static int hstate_next_node(struct hstate *h) 6605ced66c9SAndi Kleen { 6615ced66c9SAndi Kleen int next_nid; 6625ced66c9SAndi Kleen next_nid = next_node(h->hugetlb_next_nid, node_online_map); 6635ced66c9SAndi Kleen if (next_nid == MAX_NUMNODES) 6645ced66c9SAndi Kleen next_nid = first_node(node_online_map); 6655ced66c9SAndi Kleen h->hugetlb_next_nid = next_nid; 6665ced66c9SAndi Kleen return next_nid; 6675ced66c9SAndi Kleen } 6685ced66c9SAndi Kleen 669a5516438SAndi Kleen static int alloc_fresh_huge_page(struct hstate *h) 67063b4613cSNishanth Aravamudan { 67163b4613cSNishanth Aravamudan struct page *page; 67263b4613cSNishanth Aravamudan int start_nid; 67363b4613cSNishanth Aravamudan int next_nid; 67463b4613cSNishanth Aravamudan int ret = 0; 67563b4613cSNishanth Aravamudan 676a5516438SAndi Kleen start_nid = h->hugetlb_next_nid; 67763b4613cSNishanth Aravamudan 67863b4613cSNishanth Aravamudan do { 679a5516438SAndi Kleen page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); 68063b4613cSNishanth Aravamudan if (page) 68163b4613cSNishanth Aravamudan ret = 1; 6825ced66c9SAndi Kleen next_nid = hstate_next_node(h); 683a5516438SAndi Kleen } while (!page && h->hugetlb_next_nid != start_nid); 68463b4613cSNishanth Aravamudan 6853b116300SAdam Litke if (ret) 6863b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 6873b116300SAdam Litke else 6883b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 6893b116300SAdam Litke 69063b4613cSNishanth Aravamudan return ret; 6911da177e4SLinus Torvalds } 6921da177e4SLinus Torvalds 693a5516438SAndi Kleen static struct page *alloc_buddy_huge_page(struct hstate *h, 694a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 6957893d1d5SAdam Litke { 6967893d1d5SAdam Litke struct page *page; 697d1c3fb1fSNishanth Aravamudan unsigned int nid; 6987893d1d5SAdam Litke 699aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 700aa888a74SAndi Kleen return NULL; 701aa888a74SAndi Kleen 702d1c3fb1fSNishanth Aravamudan /* 703d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 704d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 705d1c3fb1fSNishanth Aravamudan * overcommit 706d1c3fb1fSNishanth Aravamudan * 707d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 708d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 709d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 710d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 711d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 712d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 713d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 714d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 715d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 716d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 717d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 718d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 719d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 720d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 721d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 722d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 723d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 724d1c3fb1fSNishanth Aravamudan */ 725d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 726a5516438SAndi Kleen if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 727d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 728d1c3fb1fSNishanth Aravamudan return NULL; 729d1c3fb1fSNishanth Aravamudan } else { 730a5516438SAndi Kleen h->nr_huge_pages++; 731a5516438SAndi Kleen h->surplus_huge_pages++; 732d1c3fb1fSNishanth Aravamudan } 733d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 734d1c3fb1fSNishanth Aravamudan 735551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 736551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 737a5516438SAndi Kleen huge_page_order(h)); 738d1c3fb1fSNishanth Aravamudan 739caff3a2cSGerald Schaefer if (page && arch_prepare_hugepage(page)) { 740caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 741caff3a2cSGerald Schaefer return NULL; 742caff3a2cSGerald Schaefer } 743caff3a2cSGerald Schaefer 7447893d1d5SAdam Litke spin_lock(&hugetlb_lock); 745d1c3fb1fSNishanth Aravamudan if (page) { 7462668db91SAdam Litke /* 7472668db91SAdam Litke * This page is now managed by the hugetlb allocator and has 7482668db91SAdam Litke * no users -- drop the buddy allocator's reference. 7492668db91SAdam Litke */ 7502668db91SAdam Litke put_page_testzero(page); 7512668db91SAdam Litke VM_BUG_ON(page_count(page)); 752d1c3fb1fSNishanth Aravamudan nid = page_to_nid(page); 753d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 754d1c3fb1fSNishanth Aravamudan /* 755d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 756d1c3fb1fSNishanth Aravamudan */ 757a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 758a5516438SAndi Kleen h->surplus_huge_pages_node[nid]++; 7593b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 760d1c3fb1fSNishanth Aravamudan } else { 761a5516438SAndi Kleen h->nr_huge_pages--; 762a5516438SAndi Kleen h->surplus_huge_pages--; 7633b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 7647893d1d5SAdam Litke } 765d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 7667893d1d5SAdam Litke 7677893d1d5SAdam Litke return page; 7687893d1d5SAdam Litke } 7697893d1d5SAdam Litke 770e4e574b7SAdam Litke /* 771e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 772e4e574b7SAdam Litke * of size 'delta'. 773e4e574b7SAdam Litke */ 774a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta) 775e4e574b7SAdam Litke { 776e4e574b7SAdam Litke struct list_head surplus_list; 777e4e574b7SAdam Litke struct page *page, *tmp; 778e4e574b7SAdam Litke int ret, i; 779e4e574b7SAdam Litke int needed, allocated; 780e4e574b7SAdam Litke 781a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 782ac09b3a1SAdam Litke if (needed <= 0) { 783a5516438SAndi Kleen h->resv_huge_pages += delta; 784e4e574b7SAdam Litke return 0; 785ac09b3a1SAdam Litke } 786e4e574b7SAdam Litke 787e4e574b7SAdam Litke allocated = 0; 788e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 789e4e574b7SAdam Litke 790e4e574b7SAdam Litke ret = -ENOMEM; 791e4e574b7SAdam Litke retry: 792e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 793e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 794a5516438SAndi Kleen page = alloc_buddy_huge_page(h, NULL, 0); 795e4e574b7SAdam Litke if (!page) { 796e4e574b7SAdam Litke /* 797e4e574b7SAdam Litke * We were not able to allocate enough pages to 798e4e574b7SAdam Litke * satisfy the entire reservation so we free what 799e4e574b7SAdam Litke * we've allocated so far. 800e4e574b7SAdam Litke */ 801e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 802e4e574b7SAdam Litke needed = 0; 803e4e574b7SAdam Litke goto free; 804e4e574b7SAdam Litke } 805e4e574b7SAdam Litke 806e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 807e4e574b7SAdam Litke } 808e4e574b7SAdam Litke allocated += needed; 809e4e574b7SAdam Litke 810e4e574b7SAdam Litke /* 811e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 812e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 813e4e574b7SAdam Litke */ 814e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 815a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - 816a5516438SAndi Kleen (h->free_huge_pages + allocated); 817e4e574b7SAdam Litke if (needed > 0) 818e4e574b7SAdam Litke goto retry; 819e4e574b7SAdam Litke 820e4e574b7SAdam Litke /* 821e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 822e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 823e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 824ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 825ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 826ac09b3a1SAdam Litke * before they are reserved. 827e4e574b7SAdam Litke */ 828e4e574b7SAdam Litke needed += allocated; 829a5516438SAndi Kleen h->resv_huge_pages += delta; 830e4e574b7SAdam Litke ret = 0; 831e4e574b7SAdam Litke free: 83219fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 83319fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 83419fc3f0aSAdam Litke if ((--needed) < 0) 83519fc3f0aSAdam Litke break; 83619fc3f0aSAdam Litke list_del(&page->lru); 837a5516438SAndi Kleen enqueue_huge_page(h, page); 83819fc3f0aSAdam Litke } 83919fc3f0aSAdam Litke 84019fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 84119fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 84219fc3f0aSAdam Litke spin_unlock(&hugetlb_lock); 843e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 844e4e574b7SAdam Litke list_del(&page->lru); 845af767cbdSAdam Litke /* 8462668db91SAdam Litke * The page has a reference count of zero already, so 8472668db91SAdam Litke * call free_huge_page directly instead of using 8482668db91SAdam Litke * put_page. This must be done with hugetlb_lock 849af767cbdSAdam Litke * unlocked which is safe because free_huge_page takes 850af767cbdSAdam Litke * hugetlb_lock before deciding how to free the page. 851af767cbdSAdam Litke */ 8522668db91SAdam Litke free_huge_page(page); 853af767cbdSAdam Litke } 85419fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 855e4e574b7SAdam Litke } 856e4e574b7SAdam Litke 857e4e574b7SAdam Litke return ret; 858e4e574b7SAdam Litke } 859e4e574b7SAdam Litke 860e4e574b7SAdam Litke /* 861e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 862e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 863e4e574b7SAdam Litke * never used. 864e4e574b7SAdam Litke */ 865a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h, 866a5516438SAndi Kleen unsigned long unused_resv_pages) 867e4e574b7SAdam Litke { 868e4e574b7SAdam Litke static int nid = -1; 869e4e574b7SAdam Litke struct page *page; 870e4e574b7SAdam Litke unsigned long nr_pages; 871e4e574b7SAdam Litke 87211320d17SNishanth Aravamudan /* 87311320d17SNishanth Aravamudan * We want to release as many surplus pages as possible, spread 87411320d17SNishanth Aravamudan * evenly across all nodes. Iterate across all nodes until we 87511320d17SNishanth Aravamudan * can no longer free unreserved surplus pages. This occurs when 87611320d17SNishanth Aravamudan * the nodes with surplus pages have no free pages. 87711320d17SNishanth Aravamudan */ 87811320d17SNishanth Aravamudan unsigned long remaining_iterations = num_online_nodes(); 87911320d17SNishanth Aravamudan 880ac09b3a1SAdam Litke /* Uncommit the reservation */ 881a5516438SAndi Kleen h->resv_huge_pages -= unused_resv_pages; 882ac09b3a1SAdam Litke 883aa888a74SAndi Kleen /* Cannot return gigantic pages currently */ 884aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 885aa888a74SAndi Kleen return; 886aa888a74SAndi Kleen 887a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 888e4e574b7SAdam Litke 88911320d17SNishanth Aravamudan while (remaining_iterations-- && nr_pages) { 890e4e574b7SAdam Litke nid = next_node(nid, node_online_map); 891e4e574b7SAdam Litke if (nid == MAX_NUMNODES) 892e4e574b7SAdam Litke nid = first_node(node_online_map); 893e4e574b7SAdam Litke 894a5516438SAndi Kleen if (!h->surplus_huge_pages_node[nid]) 895e4e574b7SAdam Litke continue; 896e4e574b7SAdam Litke 897a5516438SAndi Kleen if (!list_empty(&h->hugepage_freelists[nid])) { 898a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 899e4e574b7SAdam Litke struct page, lru); 900e4e574b7SAdam Litke list_del(&page->lru); 901a5516438SAndi Kleen update_and_free_page(h, page); 902a5516438SAndi Kleen h->free_huge_pages--; 903a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 904a5516438SAndi Kleen h->surplus_huge_pages--; 905a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 906e4e574b7SAdam Litke nr_pages--; 90711320d17SNishanth Aravamudan remaining_iterations = num_online_nodes(); 908e4e574b7SAdam Litke } 909e4e574b7SAdam Litke } 910e4e574b7SAdam Litke } 911e4e574b7SAdam Litke 912c37f9fb1SAndy Whitcroft /* 913c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 914c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 915c37f9fb1SAndy Whitcroft * reservation and actually increase quota before an allocation can occur. 916c37f9fb1SAndy Whitcroft * Where any new reservation would be required the reservation change is 917c37f9fb1SAndy Whitcroft * prepared, but not committed. Once the page has been quota'd allocated 918c37f9fb1SAndy Whitcroft * an instantiated the change should be committed via vma_commit_reservation. 919c37f9fb1SAndy Whitcroft * No action is required on failure. 920c37f9fb1SAndy Whitcroft */ 921a5516438SAndi Kleen static int vma_needs_reservation(struct hstate *h, 922a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 923c37f9fb1SAndy Whitcroft { 924c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 925c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 926c37f9fb1SAndy Whitcroft 927c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 928a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 929c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 930c37f9fb1SAndy Whitcroft idx, idx + 1); 931c37f9fb1SAndy Whitcroft 93284afd99bSAndy Whitcroft } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 933c37f9fb1SAndy Whitcroft return 1; 934c37f9fb1SAndy Whitcroft 93584afd99bSAndy Whitcroft } else { 93684afd99bSAndy Whitcroft int err; 937a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 93884afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 93984afd99bSAndy Whitcroft 94084afd99bSAndy Whitcroft err = region_chg(&reservations->regions, idx, idx + 1); 94184afd99bSAndy Whitcroft if (err < 0) 94284afd99bSAndy Whitcroft return err; 943c37f9fb1SAndy Whitcroft return 0; 944c37f9fb1SAndy Whitcroft } 94584afd99bSAndy Whitcroft } 946a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h, 947a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 948c37f9fb1SAndy Whitcroft { 949c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 950c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 951c37f9fb1SAndy Whitcroft 952c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 953a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 954c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 95584afd99bSAndy Whitcroft 95684afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 957a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 95884afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 95984afd99bSAndy Whitcroft 96084afd99bSAndy Whitcroft /* Mark this page used in the map. */ 96184afd99bSAndy Whitcroft region_add(&reservations->regions, idx, idx + 1); 962c37f9fb1SAndy Whitcroft } 963c37f9fb1SAndy Whitcroft } 964c37f9fb1SAndy Whitcroft 965348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 96604f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 967348ea204SAdam Litke { 968a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 969348ea204SAdam Litke struct page *page; 9702fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 971a1e78772SMel Gorman struct inode *inode = mapping->host; 972c37f9fb1SAndy Whitcroft unsigned int chg; 9732fc39cecSAdam Litke 974a1e78772SMel Gorman /* 975a1e78772SMel Gorman * Processes that did not create the mapping will have no reserves and 976a1e78772SMel Gorman * will not have accounted against quota. Check that the quota can be 977a1e78772SMel Gorman * made before satisfying the allocation 978c37f9fb1SAndy Whitcroft * MAP_NORESERVE mappings may also need pages and quota allocated 979c37f9fb1SAndy Whitcroft * if no reserve mapping overlaps. 980a1e78772SMel Gorman */ 981a5516438SAndi Kleen chg = vma_needs_reservation(h, vma, addr); 982c37f9fb1SAndy Whitcroft if (chg < 0) 983c37f9fb1SAndy Whitcroft return ERR_PTR(chg); 984c37f9fb1SAndy Whitcroft if (chg) 985a1e78772SMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 986a1e78772SMel Gorman return ERR_PTR(-ENOSPC); 98790d8b7e6SAdam Litke 988a1e78772SMel Gorman spin_lock(&hugetlb_lock); 989a5516438SAndi Kleen page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 990a1e78772SMel Gorman spin_unlock(&hugetlb_lock); 991a1e78772SMel Gorman 992a1e78772SMel Gorman if (!page) { 993a5516438SAndi Kleen page = alloc_buddy_huge_page(h, vma, addr); 994a1e78772SMel Gorman if (!page) { 995a1e78772SMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 996a1e78772SMel Gorman return ERR_PTR(-VM_FAULT_OOM); 997a1e78772SMel Gorman } 998a1e78772SMel Gorman } 999a1e78772SMel Gorman 1000348ea204SAdam Litke set_page_refcounted(page); 10012fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 1002a1e78772SMel Gorman 1003a5516438SAndi Kleen vma_commit_reservation(h, vma, addr); 1004c37f9fb1SAndy Whitcroft 10057893d1d5SAdam Litke return page; 1006b45b5bd6SDavid Gibson } 1007b45b5bd6SDavid Gibson 100891f47662SCyrill Gorcunov int __weak alloc_bootmem_huge_page(struct hstate *h) 1009aa888a74SAndi Kleen { 1010aa888a74SAndi Kleen struct huge_bootmem_page *m; 1011aa888a74SAndi Kleen int nr_nodes = nodes_weight(node_online_map); 1012aa888a74SAndi Kleen 1013aa888a74SAndi Kleen while (nr_nodes) { 1014aa888a74SAndi Kleen void *addr; 1015aa888a74SAndi Kleen 1016aa888a74SAndi Kleen addr = __alloc_bootmem_node_nopanic( 1017aa888a74SAndi Kleen NODE_DATA(h->hugetlb_next_nid), 1018aa888a74SAndi Kleen huge_page_size(h), huge_page_size(h), 0); 1019aa888a74SAndi Kleen 1020aa888a74SAndi Kleen if (addr) { 1021aa888a74SAndi Kleen /* 1022aa888a74SAndi Kleen * Use the beginning of the huge page to store the 1023aa888a74SAndi Kleen * huge_bootmem_page struct (until gather_bootmem 1024aa888a74SAndi Kleen * puts them into the mem_map). 1025aa888a74SAndi Kleen */ 1026aa888a74SAndi Kleen m = addr; 1027aa888a74SAndi Kleen goto found; 1028aa888a74SAndi Kleen } 1029aa888a74SAndi Kleen hstate_next_node(h); 1030aa888a74SAndi Kleen nr_nodes--; 1031aa888a74SAndi Kleen } 1032aa888a74SAndi Kleen return 0; 1033aa888a74SAndi Kleen 1034aa888a74SAndi Kleen found: 1035aa888a74SAndi Kleen BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); 1036aa888a74SAndi Kleen /* Put them into a private list first because mem_map is not up yet */ 1037aa888a74SAndi Kleen list_add(&m->list, &huge_boot_pages); 1038aa888a74SAndi Kleen m->hstate = h; 1039aa888a74SAndi Kleen return 1; 1040aa888a74SAndi Kleen } 1041aa888a74SAndi Kleen 104218229df5SAndy Whitcroft static void prep_compound_huge_page(struct page *page, int order) 104318229df5SAndy Whitcroft { 104418229df5SAndy Whitcroft if (unlikely(order > (MAX_ORDER - 1))) 104518229df5SAndy Whitcroft prep_compound_gigantic_page(page, order); 104618229df5SAndy Whitcroft else 104718229df5SAndy Whitcroft prep_compound_page(page, order); 104818229df5SAndy Whitcroft } 104918229df5SAndy Whitcroft 1050aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */ 1051aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void) 1052aa888a74SAndi Kleen { 1053aa888a74SAndi Kleen struct huge_bootmem_page *m; 1054aa888a74SAndi Kleen 1055aa888a74SAndi Kleen list_for_each_entry(m, &huge_boot_pages, list) { 1056aa888a74SAndi Kleen struct page *page = virt_to_page(m); 1057aa888a74SAndi Kleen struct hstate *h = m->hstate; 1058aa888a74SAndi Kleen __ClearPageReserved(page); 1059aa888a74SAndi Kleen WARN_ON(page_count(page) != 1); 106018229df5SAndy Whitcroft prep_compound_huge_page(page, h->order); 1061aa888a74SAndi Kleen prep_new_huge_page(h, page, page_to_nid(page)); 1062aa888a74SAndi Kleen } 1063aa888a74SAndi Kleen } 1064aa888a74SAndi Kleen 10658faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 10661da177e4SLinus Torvalds { 10671da177e4SLinus Torvalds unsigned long i; 10681da177e4SLinus Torvalds 1069e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) { 1070aa888a74SAndi Kleen if (h->order >= MAX_ORDER) { 1071aa888a74SAndi Kleen if (!alloc_bootmem_huge_page(h)) 1072aa888a74SAndi Kleen break; 1073aa888a74SAndi Kleen } else if (!alloc_fresh_huge_page(h)) 10741da177e4SLinus Torvalds break; 10751da177e4SLinus Torvalds } 10768faa8b07SAndi Kleen h->max_huge_pages = i; 1077e5ff2159SAndi Kleen } 1078e5ff2159SAndi Kleen 1079e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void) 1080e5ff2159SAndi Kleen { 1081e5ff2159SAndi Kleen struct hstate *h; 1082e5ff2159SAndi Kleen 1083e5ff2159SAndi Kleen for_each_hstate(h) { 10848faa8b07SAndi Kleen /* oversize hugepages were init'ed in early boot */ 10858faa8b07SAndi Kleen if (h->order < MAX_ORDER) 10868faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(h); 1087e5ff2159SAndi Kleen } 1088e5ff2159SAndi Kleen } 1089e5ff2159SAndi Kleen 10904abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n) 10914abd32dbSAndi Kleen { 10924abd32dbSAndi Kleen if (n >= (1UL << 30)) 10934abd32dbSAndi Kleen sprintf(buf, "%lu GB", n >> 30); 10944abd32dbSAndi Kleen else if (n >= (1UL << 20)) 10954abd32dbSAndi Kleen sprintf(buf, "%lu MB", n >> 20); 10964abd32dbSAndi Kleen else 10974abd32dbSAndi Kleen sprintf(buf, "%lu KB", n >> 10); 10984abd32dbSAndi Kleen return buf; 10994abd32dbSAndi Kleen } 11004abd32dbSAndi Kleen 1101e5ff2159SAndi Kleen static void __init report_hugepages(void) 1102e5ff2159SAndi Kleen { 1103e5ff2159SAndi Kleen struct hstate *h; 1104e5ff2159SAndi Kleen 1105e5ff2159SAndi Kleen for_each_hstate(h) { 11064abd32dbSAndi Kleen char buf[32]; 11074abd32dbSAndi Kleen printk(KERN_INFO "HugeTLB registered %s page size, " 11084abd32dbSAndi Kleen "pre-allocated %ld pages\n", 11094abd32dbSAndi Kleen memfmt(buf, huge_page_size(h)), 11104abd32dbSAndi Kleen h->free_huge_pages); 1111e5ff2159SAndi Kleen } 1112e5ff2159SAndi Kleen } 1113e5ff2159SAndi Kleen 11141da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 1115a5516438SAndi Kleen static void try_to_free_low(struct hstate *h, unsigned long count) 11161da177e4SLinus Torvalds { 11174415cc8dSChristoph Lameter int i; 11184415cc8dSChristoph Lameter 1119aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1120aa888a74SAndi Kleen return; 1121aa888a74SAndi Kleen 11221da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 11231da177e4SLinus Torvalds struct page *page, *next; 1124a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i]; 1125a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) { 1126a5516438SAndi Kleen if (count >= h->nr_huge_pages) 11276b0c880dSAdam Litke return; 11281da177e4SLinus Torvalds if (PageHighMem(page)) 11291da177e4SLinus Torvalds continue; 11301da177e4SLinus Torvalds list_del(&page->lru); 1131e5ff2159SAndi Kleen update_and_free_page(h, page); 1132a5516438SAndi Kleen h->free_huge_pages--; 1133a5516438SAndi Kleen h->free_huge_pages_node[page_to_nid(page)]--; 11341da177e4SLinus Torvalds } 11351da177e4SLinus Torvalds } 11361da177e4SLinus Torvalds } 11371da177e4SLinus Torvalds #else 1138a5516438SAndi Kleen static inline void try_to_free_low(struct hstate *h, unsigned long count) 11391da177e4SLinus Torvalds { 11401da177e4SLinus Torvalds } 11411da177e4SLinus Torvalds #endif 11421da177e4SLinus Torvalds 1143a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 1144e5ff2159SAndi Kleen static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) 11451da177e4SLinus Torvalds { 11467893d1d5SAdam Litke unsigned long min_count, ret; 11471da177e4SLinus Torvalds 1148aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1149aa888a74SAndi Kleen return h->max_huge_pages; 1150aa888a74SAndi Kleen 11517893d1d5SAdam Litke /* 11527893d1d5SAdam Litke * Increase the pool size 11537893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 11547893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 1155d1c3fb1fSNishanth Aravamudan * 1156d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 1157d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 1158d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 1159d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 1160d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 11617893d1d5SAdam Litke */ 11621da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 1163a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 1164a5516438SAndi Kleen if (!adjust_pool_surplus(h, -1)) 11657893d1d5SAdam Litke break; 11667893d1d5SAdam Litke } 11677893d1d5SAdam Litke 1168a5516438SAndi Kleen while (count > persistent_huge_pages(h)) { 11697893d1d5SAdam Litke /* 11707893d1d5SAdam Litke * If this allocation races such that we no longer need the 11717893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 11727893d1d5SAdam Litke * and reducing the surplus. 11737893d1d5SAdam Litke */ 11747893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 1175a5516438SAndi Kleen ret = alloc_fresh_huge_page(h); 11767893d1d5SAdam Litke spin_lock(&hugetlb_lock); 11777893d1d5SAdam Litke if (!ret) 11787893d1d5SAdam Litke goto out; 11797893d1d5SAdam Litke 11807893d1d5SAdam Litke } 11817893d1d5SAdam Litke 11827893d1d5SAdam Litke /* 11837893d1d5SAdam Litke * Decrease the pool size 11847893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 11857893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 11867893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 11877893d1d5SAdam Litke * to the desired size as pages become free. 1188d1c3fb1fSNishanth Aravamudan * 1189d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 1190d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 1191d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 1192d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 1193d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 1194d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 1195d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 11967893d1d5SAdam Litke */ 1197a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 11986b0c880dSAdam Litke min_count = max(count, min_count); 1199a5516438SAndi Kleen try_to_free_low(h, min_count); 1200a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) { 1201a5516438SAndi Kleen struct page *page = dequeue_huge_page(h); 12021da177e4SLinus Torvalds if (!page) 12031da177e4SLinus Torvalds break; 1204a5516438SAndi Kleen update_and_free_page(h, page); 12051da177e4SLinus Torvalds } 1206a5516438SAndi Kleen while (count < persistent_huge_pages(h)) { 1207a5516438SAndi Kleen if (!adjust_pool_surplus(h, 1)) 12087893d1d5SAdam Litke break; 12097893d1d5SAdam Litke } 12107893d1d5SAdam Litke out: 1211a5516438SAndi Kleen ret = persistent_huge_pages(h); 12121da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 12137893d1d5SAdam Litke return ret; 12141da177e4SLinus Torvalds } 12151da177e4SLinus Torvalds 1216a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \ 1217a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1218a3437870SNishanth Aravamudan 1219a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \ 1220a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = \ 1221a3437870SNishanth Aravamudan __ATTR(_name, 0644, _name##_show, _name##_store) 1222a3437870SNishanth Aravamudan 1223a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj; 1224a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1225a3437870SNishanth Aravamudan 1226a3437870SNishanth Aravamudan static struct hstate *kobj_to_hstate(struct kobject *kobj) 1227a3437870SNishanth Aravamudan { 1228a3437870SNishanth Aravamudan int i; 1229a3437870SNishanth Aravamudan for (i = 0; i < HUGE_MAX_HSTATE; i++) 1230a3437870SNishanth Aravamudan if (hstate_kobjs[i] == kobj) 1231a3437870SNishanth Aravamudan return &hstates[i]; 1232a3437870SNishanth Aravamudan BUG(); 1233a3437870SNishanth Aravamudan return NULL; 1234a3437870SNishanth Aravamudan } 1235a3437870SNishanth Aravamudan 1236a3437870SNishanth Aravamudan static ssize_t nr_hugepages_show(struct kobject *kobj, 1237a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1238a3437870SNishanth Aravamudan { 1239a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1240a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_huge_pages); 1241a3437870SNishanth Aravamudan } 1242a3437870SNishanth Aravamudan static ssize_t nr_hugepages_store(struct kobject *kobj, 1243a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1244a3437870SNishanth Aravamudan { 1245a3437870SNishanth Aravamudan int err; 1246a3437870SNishanth Aravamudan unsigned long input; 1247a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1248a3437870SNishanth Aravamudan 1249a3437870SNishanth Aravamudan err = strict_strtoul(buf, 10, &input); 1250a3437870SNishanth Aravamudan if (err) 1251a3437870SNishanth Aravamudan return 0; 1252a3437870SNishanth Aravamudan 1253a3437870SNishanth Aravamudan h->max_huge_pages = set_max_huge_pages(h, input); 1254a3437870SNishanth Aravamudan 1255a3437870SNishanth Aravamudan return count; 1256a3437870SNishanth Aravamudan } 1257a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages); 1258a3437870SNishanth Aravamudan 1259a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1260a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1261a3437870SNishanth Aravamudan { 1262a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1263a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1264a3437870SNishanth Aravamudan } 1265a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1266a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1267a3437870SNishanth Aravamudan { 1268a3437870SNishanth Aravamudan int err; 1269a3437870SNishanth Aravamudan unsigned long input; 1270a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1271a3437870SNishanth Aravamudan 1272a3437870SNishanth Aravamudan err = strict_strtoul(buf, 10, &input); 1273a3437870SNishanth Aravamudan if (err) 1274a3437870SNishanth Aravamudan return 0; 1275a3437870SNishanth Aravamudan 1276a3437870SNishanth Aravamudan spin_lock(&hugetlb_lock); 1277a3437870SNishanth Aravamudan h->nr_overcommit_huge_pages = input; 1278a3437870SNishanth Aravamudan spin_unlock(&hugetlb_lock); 1279a3437870SNishanth Aravamudan 1280a3437870SNishanth Aravamudan return count; 1281a3437870SNishanth Aravamudan } 1282a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages); 1283a3437870SNishanth Aravamudan 1284a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj, 1285a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1286a3437870SNishanth Aravamudan { 1287a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1288a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->free_huge_pages); 1289a3437870SNishanth Aravamudan } 1290a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages); 1291a3437870SNishanth Aravamudan 1292a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj, 1293a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1294a3437870SNishanth Aravamudan { 1295a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1296a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->resv_huge_pages); 1297a3437870SNishanth Aravamudan } 1298a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages); 1299a3437870SNishanth Aravamudan 1300a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj, 1301a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1302a3437870SNishanth Aravamudan { 1303a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1304a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->surplus_huge_pages); 1305a3437870SNishanth Aravamudan } 1306a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages); 1307a3437870SNishanth Aravamudan 1308a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = { 1309a3437870SNishanth Aravamudan &nr_hugepages_attr.attr, 1310a3437870SNishanth Aravamudan &nr_overcommit_hugepages_attr.attr, 1311a3437870SNishanth Aravamudan &free_hugepages_attr.attr, 1312a3437870SNishanth Aravamudan &resv_hugepages_attr.attr, 1313a3437870SNishanth Aravamudan &surplus_hugepages_attr.attr, 1314a3437870SNishanth Aravamudan NULL, 1315a3437870SNishanth Aravamudan }; 1316a3437870SNishanth Aravamudan 1317a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = { 1318a3437870SNishanth Aravamudan .attrs = hstate_attrs, 1319a3437870SNishanth Aravamudan }; 1320a3437870SNishanth Aravamudan 1321a3437870SNishanth Aravamudan static int __init hugetlb_sysfs_add_hstate(struct hstate *h) 1322a3437870SNishanth Aravamudan { 1323a3437870SNishanth Aravamudan int retval; 1324a3437870SNishanth Aravamudan 1325a3437870SNishanth Aravamudan hstate_kobjs[h - hstates] = kobject_create_and_add(h->name, 1326a3437870SNishanth Aravamudan hugepages_kobj); 1327a3437870SNishanth Aravamudan if (!hstate_kobjs[h - hstates]) 1328a3437870SNishanth Aravamudan return -ENOMEM; 1329a3437870SNishanth Aravamudan 1330a3437870SNishanth Aravamudan retval = sysfs_create_group(hstate_kobjs[h - hstates], 1331a3437870SNishanth Aravamudan &hstate_attr_group); 1332a3437870SNishanth Aravamudan if (retval) 1333a3437870SNishanth Aravamudan kobject_put(hstate_kobjs[h - hstates]); 1334a3437870SNishanth Aravamudan 1335a3437870SNishanth Aravamudan return retval; 1336a3437870SNishanth Aravamudan } 1337a3437870SNishanth Aravamudan 1338a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void) 1339a3437870SNishanth Aravamudan { 1340a3437870SNishanth Aravamudan struct hstate *h; 1341a3437870SNishanth Aravamudan int err; 1342a3437870SNishanth Aravamudan 1343a3437870SNishanth Aravamudan hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 1344a3437870SNishanth Aravamudan if (!hugepages_kobj) 1345a3437870SNishanth Aravamudan return; 1346a3437870SNishanth Aravamudan 1347a3437870SNishanth Aravamudan for_each_hstate(h) { 1348a3437870SNishanth Aravamudan err = hugetlb_sysfs_add_hstate(h); 1349a3437870SNishanth Aravamudan if (err) 1350a3437870SNishanth Aravamudan printk(KERN_ERR "Hugetlb: Unable to add hstate %s", 1351a3437870SNishanth Aravamudan h->name); 1352a3437870SNishanth Aravamudan } 1353a3437870SNishanth Aravamudan } 1354a3437870SNishanth Aravamudan 1355a3437870SNishanth Aravamudan static void __exit hugetlb_exit(void) 1356a3437870SNishanth Aravamudan { 1357a3437870SNishanth Aravamudan struct hstate *h; 1358a3437870SNishanth Aravamudan 1359a3437870SNishanth Aravamudan for_each_hstate(h) { 1360a3437870SNishanth Aravamudan kobject_put(hstate_kobjs[h - hstates]); 1361a3437870SNishanth Aravamudan } 1362a3437870SNishanth Aravamudan 1363a3437870SNishanth Aravamudan kobject_put(hugepages_kobj); 1364a3437870SNishanth Aravamudan } 1365a3437870SNishanth Aravamudan module_exit(hugetlb_exit); 1366a3437870SNishanth Aravamudan 1367a3437870SNishanth Aravamudan static int __init hugetlb_init(void) 1368a3437870SNishanth Aravamudan { 13690ef89d25SBenjamin Herrenschmidt /* Some platform decide whether they support huge pages at boot 13700ef89d25SBenjamin Herrenschmidt * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 13710ef89d25SBenjamin Herrenschmidt * there is no such support 13720ef89d25SBenjamin Herrenschmidt */ 13730ef89d25SBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 13740ef89d25SBenjamin Herrenschmidt return 0; 1375a3437870SNishanth Aravamudan 1376e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) { 1377e11bfbfcSNick Piggin default_hstate_size = HPAGE_SIZE; 1378e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) 1379a3437870SNishanth Aravamudan hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 1380a3437870SNishanth Aravamudan } 1381e11bfbfcSNick Piggin default_hstate_idx = size_to_hstate(default_hstate_size) - hstates; 1382e11bfbfcSNick Piggin if (default_hstate_max_huge_pages) 1383e11bfbfcSNick Piggin default_hstate.max_huge_pages = default_hstate_max_huge_pages; 1384a3437870SNishanth Aravamudan 1385a3437870SNishanth Aravamudan hugetlb_init_hstates(); 1386a3437870SNishanth Aravamudan 1387aa888a74SAndi Kleen gather_bootmem_prealloc(); 1388aa888a74SAndi Kleen 1389a3437870SNishanth Aravamudan report_hugepages(); 1390a3437870SNishanth Aravamudan 1391a3437870SNishanth Aravamudan hugetlb_sysfs_init(); 1392a3437870SNishanth Aravamudan 1393a3437870SNishanth Aravamudan return 0; 1394a3437870SNishanth Aravamudan } 1395a3437870SNishanth Aravamudan module_init(hugetlb_init); 1396a3437870SNishanth Aravamudan 1397a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */ 1398a3437870SNishanth Aravamudan void __init hugetlb_add_hstate(unsigned order) 1399a3437870SNishanth Aravamudan { 1400a3437870SNishanth Aravamudan struct hstate *h; 14018faa8b07SAndi Kleen unsigned long i; 14028faa8b07SAndi Kleen 1403a3437870SNishanth Aravamudan if (size_to_hstate(PAGE_SIZE << order)) { 1404a3437870SNishanth Aravamudan printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); 1405a3437870SNishanth Aravamudan return; 1406a3437870SNishanth Aravamudan } 1407a3437870SNishanth Aravamudan BUG_ON(max_hstate >= HUGE_MAX_HSTATE); 1408a3437870SNishanth Aravamudan BUG_ON(order == 0); 1409a3437870SNishanth Aravamudan h = &hstates[max_hstate++]; 1410a3437870SNishanth Aravamudan h->order = order; 1411a3437870SNishanth Aravamudan h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 14128faa8b07SAndi Kleen h->nr_huge_pages = 0; 14138faa8b07SAndi Kleen h->free_huge_pages = 0; 14148faa8b07SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i) 14158faa8b07SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]); 14168faa8b07SAndi Kleen h->hugetlb_next_nid = first_node(node_online_map); 1417a3437870SNishanth Aravamudan snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1418a3437870SNishanth Aravamudan huge_page_size(h)/1024); 14198faa8b07SAndi Kleen 1420a3437870SNishanth Aravamudan parsed_hstate = h; 1421a3437870SNishanth Aravamudan } 1422a3437870SNishanth Aravamudan 1423e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s) 1424a3437870SNishanth Aravamudan { 1425a3437870SNishanth Aravamudan unsigned long *mhp; 14268faa8b07SAndi Kleen static unsigned long *last_mhp; 1427a3437870SNishanth Aravamudan 1428a3437870SNishanth Aravamudan /* 1429a3437870SNishanth Aravamudan * !max_hstate means we haven't parsed a hugepagesz= parameter yet, 1430a3437870SNishanth Aravamudan * so this hugepages= parameter goes to the "default hstate". 1431a3437870SNishanth Aravamudan */ 1432a3437870SNishanth Aravamudan if (!max_hstate) 1433a3437870SNishanth Aravamudan mhp = &default_hstate_max_huge_pages; 1434a3437870SNishanth Aravamudan else 1435a3437870SNishanth Aravamudan mhp = &parsed_hstate->max_huge_pages; 1436a3437870SNishanth Aravamudan 14378faa8b07SAndi Kleen if (mhp == last_mhp) { 14388faa8b07SAndi Kleen printk(KERN_WARNING "hugepages= specified twice without " 14398faa8b07SAndi Kleen "interleaving hugepagesz=, ignoring\n"); 14408faa8b07SAndi Kleen return 1; 14418faa8b07SAndi Kleen } 14428faa8b07SAndi Kleen 1443a3437870SNishanth Aravamudan if (sscanf(s, "%lu", mhp) <= 0) 1444a3437870SNishanth Aravamudan *mhp = 0; 1445a3437870SNishanth Aravamudan 14468faa8b07SAndi Kleen /* 14478faa8b07SAndi Kleen * Global state is always initialized later in hugetlb_init. 14488faa8b07SAndi Kleen * But we need to allocate >= MAX_ORDER hstates here early to still 14498faa8b07SAndi Kleen * use the bootmem allocator. 14508faa8b07SAndi Kleen */ 14518faa8b07SAndi Kleen if (max_hstate && parsed_hstate->order >= MAX_ORDER) 14528faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(parsed_hstate); 14538faa8b07SAndi Kleen 14548faa8b07SAndi Kleen last_mhp = mhp; 14558faa8b07SAndi Kleen 1456a3437870SNishanth Aravamudan return 1; 1457a3437870SNishanth Aravamudan } 1458e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup); 1459e11bfbfcSNick Piggin 1460e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s) 1461e11bfbfcSNick Piggin { 1462e11bfbfcSNick Piggin default_hstate_size = memparse(s, &s); 1463e11bfbfcSNick Piggin return 1; 1464e11bfbfcSNick Piggin } 1465e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup); 1466a3437870SNishanth Aravamudan 14678a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array) 14688a213460SNishanth Aravamudan { 14698a213460SNishanth Aravamudan int node; 14708a213460SNishanth Aravamudan unsigned int nr = 0; 14718a213460SNishanth Aravamudan 14728a213460SNishanth Aravamudan for_each_node_mask(node, cpuset_current_mems_allowed) 14738a213460SNishanth Aravamudan nr += array[node]; 14748a213460SNishanth Aravamudan 14758a213460SNishanth Aravamudan return nr; 14768a213460SNishanth Aravamudan } 14778a213460SNishanth Aravamudan 14788a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL 14791da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 14801da177e4SLinus Torvalds struct file *file, void __user *buffer, 14811da177e4SLinus Torvalds size_t *length, loff_t *ppos) 14821da177e4SLinus Torvalds { 1483e5ff2159SAndi Kleen struct hstate *h = &default_hstate; 1484e5ff2159SAndi Kleen unsigned long tmp; 1485e5ff2159SAndi Kleen 1486e5ff2159SAndi Kleen if (!write) 1487e5ff2159SAndi Kleen tmp = h->max_huge_pages; 1488e5ff2159SAndi Kleen 1489e5ff2159SAndi Kleen table->data = &tmp; 1490e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 14911da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 1492e5ff2159SAndi Kleen 1493e5ff2159SAndi Kleen if (write) 1494e5ff2159SAndi Kleen h->max_huge_pages = set_max_huge_pages(h, tmp); 1495e5ff2159SAndi Kleen 14961da177e4SLinus Torvalds return 0; 14971da177e4SLinus Torvalds } 1498396faf03SMel Gorman 1499396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 1500396faf03SMel Gorman struct file *file, void __user *buffer, 1501396faf03SMel Gorman size_t *length, loff_t *ppos) 1502396faf03SMel Gorman { 1503396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 1504396faf03SMel Gorman if (hugepages_treat_as_movable) 1505396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 1506396faf03SMel Gorman else 1507396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 1508396faf03SMel Gorman return 0; 1509396faf03SMel Gorman } 1510396faf03SMel Gorman 1511a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 1512a3d0c6aaSNishanth Aravamudan struct file *file, void __user *buffer, 1513a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 1514a3d0c6aaSNishanth Aravamudan { 1515a5516438SAndi Kleen struct hstate *h = &default_hstate; 1516e5ff2159SAndi Kleen unsigned long tmp; 1517e5ff2159SAndi Kleen 1518e5ff2159SAndi Kleen if (!write) 1519e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages; 1520e5ff2159SAndi Kleen 1521e5ff2159SAndi Kleen table->data = &tmp; 1522e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 1523a3d0c6aaSNishanth Aravamudan proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 1524e5ff2159SAndi Kleen 1525e5ff2159SAndi Kleen if (write) { 1526064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 1527e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp; 1528a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 1529e5ff2159SAndi Kleen } 1530e5ff2159SAndi Kleen 1531a3d0c6aaSNishanth Aravamudan return 0; 1532a3d0c6aaSNishanth Aravamudan } 1533a3d0c6aaSNishanth Aravamudan 15341da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 15351da177e4SLinus Torvalds 1536e1759c21SAlexey Dobriyan void hugetlb_report_meminfo(struct seq_file *m) 15371da177e4SLinus Torvalds { 1538a5516438SAndi Kleen struct hstate *h = &default_hstate; 1539e1759c21SAlexey Dobriyan seq_printf(m, 15401da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 15411da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 1542b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 15437893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 15444f98a2feSRik van Riel "Hugepagesize: %8lu kB\n", 1545a5516438SAndi Kleen h->nr_huge_pages, 1546a5516438SAndi Kleen h->free_huge_pages, 1547a5516438SAndi Kleen h->resv_huge_pages, 1548a5516438SAndi Kleen h->surplus_huge_pages, 1549a5516438SAndi Kleen 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 15501da177e4SLinus Torvalds } 15511da177e4SLinus Torvalds 15521da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 15531da177e4SLinus Torvalds { 1554a5516438SAndi Kleen struct hstate *h = &default_hstate; 15551da177e4SLinus Torvalds return sprintf(buf, 15561da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 1557a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 1558a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 1559a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid], 1560a5516438SAndi Kleen nid, h->free_huge_pages_node[nid], 1561a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]); 15621da177e4SLinus Torvalds } 15631da177e4SLinus Torvalds 15641da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 15651da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 15661da177e4SLinus Torvalds { 1567a5516438SAndi Kleen struct hstate *h = &default_hstate; 1568a5516438SAndi Kleen return h->nr_huge_pages * pages_per_huge_page(h); 15691da177e4SLinus Torvalds } 15701da177e4SLinus Torvalds 1571a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta) 1572fc1b8a73SMel Gorman { 1573fc1b8a73SMel Gorman int ret = -ENOMEM; 1574fc1b8a73SMel Gorman 1575fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 1576fc1b8a73SMel Gorman /* 1577fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 1578fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 1579fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 1580fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 1581fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 1582fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 1583fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 1584fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 1585fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 1586fc1b8a73SMel Gorman * 1587fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 1588fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 1589fc1b8a73SMel Gorman * we fall back to check against current free page availability as 1590fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 1591fc1b8a73SMel Gorman * semantics that cpuset has. 1592fc1b8a73SMel Gorman */ 1593fc1b8a73SMel Gorman if (delta > 0) { 1594a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0) 1595fc1b8a73SMel Gorman goto out; 1596fc1b8a73SMel Gorman 1597a5516438SAndi Kleen if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 1598a5516438SAndi Kleen return_unused_surplus_pages(h, delta); 1599fc1b8a73SMel Gorman goto out; 1600fc1b8a73SMel Gorman } 1601fc1b8a73SMel Gorman } 1602fc1b8a73SMel Gorman 1603fc1b8a73SMel Gorman ret = 0; 1604fc1b8a73SMel Gorman if (delta < 0) 1605a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta); 1606fc1b8a73SMel Gorman 1607fc1b8a73SMel Gorman out: 1608fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 1609fc1b8a73SMel Gorman return ret; 1610fc1b8a73SMel Gorman } 1611fc1b8a73SMel Gorman 161284afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 161384afd99bSAndy Whitcroft { 161484afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 161584afd99bSAndy Whitcroft 161684afd99bSAndy Whitcroft /* 161784afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 161884afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 161984afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 162084afd99bSAndy Whitcroft * has a reference to the reservation map it cannot dissappear until 162184afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 162284afd99bSAndy Whitcroft * new reference here without additional locking. 162384afd99bSAndy Whitcroft */ 162484afd99bSAndy Whitcroft if (reservations) 162584afd99bSAndy Whitcroft kref_get(&reservations->refs); 162684afd99bSAndy Whitcroft } 162784afd99bSAndy Whitcroft 1628a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 1629a1e78772SMel Gorman { 1630a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 163184afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 163284afd99bSAndy Whitcroft unsigned long reserve; 163384afd99bSAndy Whitcroft unsigned long start; 163484afd99bSAndy Whitcroft unsigned long end; 163584afd99bSAndy Whitcroft 163684afd99bSAndy Whitcroft if (reservations) { 1637a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start); 1638a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end); 163984afd99bSAndy Whitcroft 164084afd99bSAndy Whitcroft reserve = (end - start) - 164184afd99bSAndy Whitcroft region_count(&reservations->regions, start, end); 164284afd99bSAndy Whitcroft 164384afd99bSAndy Whitcroft kref_put(&reservations->refs, resv_map_release); 164484afd99bSAndy Whitcroft 16457251ff78SAdam Litke if (reserve) { 1646a5516438SAndi Kleen hugetlb_acct_memory(h, -reserve); 16477251ff78SAdam Litke hugetlb_put_quota(vma->vm_file->f_mapping, reserve); 16487251ff78SAdam Litke } 1649a1e78772SMel Gorman } 165084afd99bSAndy Whitcroft } 1651a1e78772SMel Gorman 16521da177e4SLinus Torvalds /* 16531da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 16541da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 16551da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 16561da177e4SLinus Torvalds * this far. 16571da177e4SLinus Torvalds */ 1658d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 16591da177e4SLinus Torvalds { 16601da177e4SLinus Torvalds BUG(); 1661d0217ac0SNick Piggin return 0; 16621da177e4SLinus Torvalds } 16631da177e4SLinus Torvalds 16641da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 1665d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 166684afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 1667a1e78772SMel Gorman .close = hugetlb_vm_op_close, 16681da177e4SLinus Torvalds }; 16691da177e4SLinus Torvalds 16701e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 16711e8f889bSDavid Gibson int writable) 167263551ae0SDavid Gibson { 167363551ae0SDavid Gibson pte_t entry; 167463551ae0SDavid Gibson 16751e8f889bSDavid Gibson if (writable) { 167663551ae0SDavid Gibson entry = 167763551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 167863551ae0SDavid Gibson } else { 16797f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 168063551ae0SDavid Gibson } 168163551ae0SDavid Gibson entry = pte_mkyoung(entry); 168263551ae0SDavid Gibson entry = pte_mkhuge(entry); 168363551ae0SDavid Gibson 168463551ae0SDavid Gibson return entry; 168563551ae0SDavid Gibson } 168663551ae0SDavid Gibson 16871e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 16881e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 16891e8f889bSDavid Gibson { 16901e8f889bSDavid Gibson pte_t entry; 16911e8f889bSDavid Gibson 16927f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 16937f2e9525SGerald Schaefer if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 16941e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 16951e8f889bSDavid Gibson } 16968dab5241SBenjamin Herrenschmidt } 16971e8f889bSDavid Gibson 16981e8f889bSDavid Gibson 169963551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 170063551ae0SDavid Gibson struct vm_area_struct *vma) 170163551ae0SDavid Gibson { 170263551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 170363551ae0SDavid Gibson struct page *ptepage; 17041c59827dSHugh Dickins unsigned long addr; 17051e8f889bSDavid Gibson int cow; 1706a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1707a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 17081e8f889bSDavid Gibson 17091e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 171063551ae0SDavid Gibson 1711a5516438SAndi Kleen for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 1712c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 1713c74df32cSHugh Dickins if (!src_pte) 1714c74df32cSHugh Dickins continue; 1715a5516438SAndi Kleen dst_pte = huge_pte_alloc(dst, addr, sz); 171663551ae0SDavid Gibson if (!dst_pte) 171763551ae0SDavid Gibson goto nomem; 1718c5c99429SLarry Woodman 1719c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 1720c5c99429SLarry Woodman if (dst_pte == src_pte) 1721c5c99429SLarry Woodman continue; 1722c5c99429SLarry Woodman 1723c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 172446478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 17257f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 17261e8f889bSDavid Gibson if (cow) 17277f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 17287f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 172963551ae0SDavid Gibson ptepage = pte_page(entry); 173063551ae0SDavid Gibson get_page(ptepage); 173163551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 17321c59827dSHugh Dickins } 17331c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 1734c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 173563551ae0SDavid Gibson } 173663551ae0SDavid Gibson return 0; 173763551ae0SDavid Gibson 173863551ae0SDavid Gibson nomem: 173963551ae0SDavid Gibson return -ENOMEM; 174063551ae0SDavid Gibson } 174163551ae0SDavid Gibson 1742502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 174304f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 174463551ae0SDavid Gibson { 174563551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 174663551ae0SDavid Gibson unsigned long address; 1747c7546f8fSDavid Gibson pte_t *ptep; 174863551ae0SDavid Gibson pte_t pte; 174963551ae0SDavid Gibson struct page *page; 1750fe1668aeSChen, Kenneth W struct page *tmp; 1751a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1752a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 1753a5516438SAndi Kleen 1754c0a499c2SChen, Kenneth W /* 1755c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 1756c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 1757c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 1758c0a499c2SChen, Kenneth W */ 1759fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 176063551ae0SDavid Gibson 176163551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 1762a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h)); 1763a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h)); 176463551ae0SDavid Gibson 1765cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(mm, start, end); 1766508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 1767a5516438SAndi Kleen for (address = start; address < end; address += sz) { 1768c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 1769c7546f8fSDavid Gibson if (!ptep) 1770c7546f8fSDavid Gibson continue; 1771c7546f8fSDavid Gibson 177239dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 177339dde65cSChen, Kenneth W continue; 177439dde65cSChen, Kenneth W 177504f2cbe3SMel Gorman /* 177604f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 177704f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 177804f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 177904f2cbe3SMel Gorman */ 178004f2cbe3SMel Gorman if (ref_page) { 178104f2cbe3SMel Gorman pte = huge_ptep_get(ptep); 178204f2cbe3SMel Gorman if (huge_pte_none(pte)) 178304f2cbe3SMel Gorman continue; 178404f2cbe3SMel Gorman page = pte_page(pte); 178504f2cbe3SMel Gorman if (page != ref_page) 178604f2cbe3SMel Gorman continue; 178704f2cbe3SMel Gorman 178804f2cbe3SMel Gorman /* 178904f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 179004f2cbe3SMel Gorman * future faults in this VMA will fail rather than 179104f2cbe3SMel Gorman * looking like data was lost 179204f2cbe3SMel Gorman */ 179304f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 179404f2cbe3SMel Gorman } 179504f2cbe3SMel Gorman 1796c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 17977f2e9525SGerald Schaefer if (huge_pte_none(pte)) 179863551ae0SDavid Gibson continue; 1799c7546f8fSDavid Gibson 180063551ae0SDavid Gibson page = pte_page(pte); 18016649a386SKen Chen if (pte_dirty(pte)) 18026649a386SKen Chen set_page_dirty(page); 1803fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 180463551ae0SDavid Gibson } 18051da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 1806508034a3SHugh Dickins flush_tlb_range(vma, start, end); 1807cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start, end); 1808fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 1809fe1668aeSChen, Kenneth W list_del(&page->lru); 1810fe1668aeSChen, Kenneth W put_page(page); 1811fe1668aeSChen, Kenneth W } 18121da177e4SLinus Torvalds } 181363551ae0SDavid Gibson 1814502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 181504f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 1816502717f4SChen, Kenneth W { 1817502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 181804f2cbe3SMel Gorman __unmap_hugepage_range(vma, start, end, ref_page); 1819502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 1820502717f4SChen, Kenneth W } 1821502717f4SChen, Kenneth W 182204f2cbe3SMel Gorman /* 182304f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 182404f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 182504f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 182604f2cbe3SMel Gorman * same region. 182704f2cbe3SMel Gorman */ 18282a4b3dedSHarvey Harrison static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 18292a4b3dedSHarvey Harrison struct page *page, unsigned long address) 183004f2cbe3SMel Gorman { 18317526674dSAdam Litke struct hstate *h = hstate_vma(vma); 183204f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 183304f2cbe3SMel Gorman struct address_space *mapping; 183404f2cbe3SMel Gorman struct prio_tree_iter iter; 183504f2cbe3SMel Gorman pgoff_t pgoff; 183604f2cbe3SMel Gorman 183704f2cbe3SMel Gorman /* 183804f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 183904f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 184004f2cbe3SMel Gorman */ 18417526674dSAdam Litke address = address & huge_page_mask(h); 184204f2cbe3SMel Gorman pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) 184304f2cbe3SMel Gorman + (vma->vm_pgoff >> PAGE_SHIFT); 184404f2cbe3SMel Gorman mapping = (struct address_space *)page_private(page); 184504f2cbe3SMel Gorman 184604f2cbe3SMel Gorman vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 184704f2cbe3SMel Gorman /* Do not unmap the current VMA */ 184804f2cbe3SMel Gorman if (iter_vma == vma) 184904f2cbe3SMel Gorman continue; 185004f2cbe3SMel Gorman 185104f2cbe3SMel Gorman /* 185204f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 185304f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 185404f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 185504f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 185604f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 185704f2cbe3SMel Gorman */ 185804f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 185904f2cbe3SMel Gorman unmap_hugepage_range(iter_vma, 18607526674dSAdam Litke address, address + huge_page_size(h), 186104f2cbe3SMel Gorman page); 186204f2cbe3SMel Gorman } 186304f2cbe3SMel Gorman 186404f2cbe3SMel Gorman return 1; 186504f2cbe3SMel Gorman } 186604f2cbe3SMel Gorman 18671e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 186804f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 186904f2cbe3SMel Gorman struct page *pagecache_page) 18701e8f889bSDavid Gibson { 1871a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 18721e8f889bSDavid Gibson struct page *old_page, *new_page; 187379ac6ba4SDavid Gibson int avoidcopy; 187404f2cbe3SMel Gorman int outside_reserve = 0; 18751e8f889bSDavid Gibson 18761e8f889bSDavid Gibson old_page = pte_page(pte); 18771e8f889bSDavid Gibson 187804f2cbe3SMel Gorman retry_avoidcopy: 18791e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 18801e8f889bSDavid Gibson * and just make the page writable */ 18811e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 18821e8f889bSDavid Gibson if (avoidcopy) { 18831e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 188483c54070SNick Piggin return 0; 18851e8f889bSDavid Gibson } 18861e8f889bSDavid Gibson 188704f2cbe3SMel Gorman /* 188804f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 188904f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 189004f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 189104f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 189204f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 189304f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 189404f2cbe3SMel Gorman * of the full address range. 189504f2cbe3SMel Gorman */ 189604f2cbe3SMel Gorman if (!(vma->vm_flags & VM_SHARED) && 189704f2cbe3SMel Gorman is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 189804f2cbe3SMel Gorman old_page != pagecache_page) 189904f2cbe3SMel Gorman outside_reserve = 1; 190004f2cbe3SMel Gorman 19011e8f889bSDavid Gibson page_cache_get(old_page); 190204f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 19031e8f889bSDavid Gibson 19042fc39cecSAdam Litke if (IS_ERR(new_page)) { 19051e8f889bSDavid Gibson page_cache_release(old_page); 190604f2cbe3SMel Gorman 190704f2cbe3SMel Gorman /* 190804f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 190904f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 191004f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 191104f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 191204f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 191304f2cbe3SMel Gorman */ 191404f2cbe3SMel Gorman if (outside_reserve) { 191504f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 191604f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 191704f2cbe3SMel Gorman BUG_ON(page_count(old_page) != 1); 191804f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 191904f2cbe3SMel Gorman goto retry_avoidcopy; 192004f2cbe3SMel Gorman } 192104f2cbe3SMel Gorman WARN_ON_ONCE(1); 192204f2cbe3SMel Gorman } 192304f2cbe3SMel Gorman 19242fc39cecSAdam Litke return -PTR_ERR(new_page); 19251e8f889bSDavid Gibson } 19261e8f889bSDavid Gibson 19271e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 19289de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 19290ed361deSNick Piggin __SetPageUptodate(new_page); 19301e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 19311e8f889bSDavid Gibson 1932a5516438SAndi Kleen ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 19337f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 19341e8f889bSDavid Gibson /* Break COW */ 19358fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 19361e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 19371e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 19381e8f889bSDavid Gibson /* Make the old page be freed below */ 19391e8f889bSDavid Gibson new_page = old_page; 19401e8f889bSDavid Gibson } 19411e8f889bSDavid Gibson page_cache_release(new_page); 19421e8f889bSDavid Gibson page_cache_release(old_page); 194383c54070SNick Piggin return 0; 19441e8f889bSDavid Gibson } 19451e8f889bSDavid Gibson 194604f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 1947a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h, 1948a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 194904f2cbe3SMel Gorman { 195004f2cbe3SMel Gorman struct address_space *mapping; 1951e7c4b0bfSAndy Whitcroft pgoff_t idx; 195204f2cbe3SMel Gorman 195304f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 1954a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 195504f2cbe3SMel Gorman 195604f2cbe3SMel Gorman return find_lock_page(mapping, idx); 195704f2cbe3SMel Gorman } 195804f2cbe3SMel Gorman 1959a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 19601e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 1961ac9b9c66SHugh Dickins { 1962a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1963ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 1964e7c4b0bfSAndy Whitcroft pgoff_t idx; 19654c887265SAdam Litke unsigned long size; 19664c887265SAdam Litke struct page *page; 19674c887265SAdam Litke struct address_space *mapping; 19681e8f889bSDavid Gibson pte_t new_pte; 19694c887265SAdam Litke 197004f2cbe3SMel Gorman /* 197104f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 197204f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 197304f2cbe3SMel Gorman * COW. Warn that such a situation has occured as it may not be obvious 197404f2cbe3SMel Gorman */ 197504f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 197604f2cbe3SMel Gorman printk(KERN_WARNING 197704f2cbe3SMel Gorman "PID %d killed due to inadequate hugepage pool\n", 197804f2cbe3SMel Gorman current->pid); 197904f2cbe3SMel Gorman return ret; 198004f2cbe3SMel Gorman } 198104f2cbe3SMel Gorman 19824c887265SAdam Litke mapping = vma->vm_file->f_mapping; 1983a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 19844c887265SAdam Litke 19854c887265SAdam Litke /* 19864c887265SAdam Litke * Use page lock to guard against racing truncation 19874c887265SAdam Litke * before we get page_table_lock. 19884c887265SAdam Litke */ 19896bda666aSChristoph Lameter retry: 19906bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 19916bda666aSChristoph Lameter if (!page) { 1992a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 1993ebed4bfcSHugh Dickins if (idx >= size) 1994ebed4bfcSHugh Dickins goto out; 199504f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 19962fc39cecSAdam Litke if (IS_ERR(page)) { 19972fc39cecSAdam Litke ret = -PTR_ERR(page); 19986bda666aSChristoph Lameter goto out; 19996bda666aSChristoph Lameter } 2000a5516438SAndi Kleen clear_huge_page(page, address, huge_page_size(h)); 20010ed361deSNick Piggin __SetPageUptodate(page); 2002ac9b9c66SHugh Dickins 20036bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 20046bda666aSChristoph Lameter int err; 200545c682a6SKen Chen struct inode *inode = mapping->host; 20066bda666aSChristoph Lameter 20076bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 20086bda666aSChristoph Lameter if (err) { 20096bda666aSChristoph Lameter put_page(page); 20106bda666aSChristoph Lameter if (err == -EEXIST) 20116bda666aSChristoph Lameter goto retry; 20126bda666aSChristoph Lameter goto out; 20136bda666aSChristoph Lameter } 201445c682a6SKen Chen 201545c682a6SKen Chen spin_lock(&inode->i_lock); 2016a5516438SAndi Kleen inode->i_blocks += blocks_per_huge_page(h); 201745c682a6SKen Chen spin_unlock(&inode->i_lock); 20186bda666aSChristoph Lameter } else 20196bda666aSChristoph Lameter lock_page(page); 20206bda666aSChristoph Lameter } 20211e8f889bSDavid Gibson 202257303d80SAndy Whitcroft /* 202357303d80SAndy Whitcroft * If we are going to COW a private mapping later, we examine the 202457303d80SAndy Whitcroft * pending reservations for this page now. This will ensure that 202557303d80SAndy Whitcroft * any allocations necessary to record that reservation occur outside 202657303d80SAndy Whitcroft * the spinlock. 202757303d80SAndy Whitcroft */ 202857303d80SAndy Whitcroft if (write_access && !(vma->vm_flags & VM_SHARED)) 20292b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 20302b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 20312b26736cSAndy Whitcroft goto backout_unlocked; 20322b26736cSAndy Whitcroft } 203357303d80SAndy Whitcroft 2034ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 2035a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 20364c887265SAdam Litke if (idx >= size) 20374c887265SAdam Litke goto backout; 20384c887265SAdam Litke 203983c54070SNick Piggin ret = 0; 20407f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 20414c887265SAdam Litke goto backout; 20424c887265SAdam Litke 20431e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 20441e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 20451e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 20461e8f889bSDavid Gibson 20471e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 20481e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 204904f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 20501e8f889bSDavid Gibson } 20511e8f889bSDavid Gibson 2052ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 20534c887265SAdam Litke unlock_page(page); 20544c887265SAdam Litke out: 2055ac9b9c66SHugh Dickins return ret; 20564c887265SAdam Litke 20574c887265SAdam Litke backout: 20584c887265SAdam Litke spin_unlock(&mm->page_table_lock); 20592b26736cSAndy Whitcroft backout_unlocked: 20604c887265SAdam Litke unlock_page(page); 20614c887265SAdam Litke put_page(page); 20624c887265SAdam Litke goto out; 2063ac9b9c66SHugh Dickins } 2064ac9b9c66SHugh Dickins 206586e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 206686e5216fSAdam Litke unsigned long address, int write_access) 206786e5216fSAdam Litke { 206886e5216fSAdam Litke pte_t *ptep; 206986e5216fSAdam Litke pte_t entry; 20701e8f889bSDavid Gibson int ret; 207157303d80SAndy Whitcroft struct page *pagecache_page = NULL; 20723935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 2073a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 207486e5216fSAdam Litke 2075a5516438SAndi Kleen ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 207686e5216fSAdam Litke if (!ptep) 207786e5216fSAdam Litke return VM_FAULT_OOM; 207886e5216fSAdam Litke 20793935baa9SDavid Gibson /* 20803935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 20813935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 20823935baa9SDavid Gibson * the same page in the page cache. 20833935baa9SDavid Gibson */ 20843935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 20857f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 20867f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 20873935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 2088b4d1d99fSDavid Gibson goto out_mutex; 20893935baa9SDavid Gibson } 209086e5216fSAdam Litke 209183c54070SNick Piggin ret = 0; 20921e8f889bSDavid Gibson 209357303d80SAndy Whitcroft /* 209457303d80SAndy Whitcroft * If we are going to COW the mapping later, we examine the pending 209557303d80SAndy Whitcroft * reservations for this page now. This will ensure that any 209657303d80SAndy Whitcroft * allocations necessary to record that reservation occur outside the 209757303d80SAndy Whitcroft * spinlock. For private mappings, we also lookup the pagecache 209857303d80SAndy Whitcroft * page now as it is used to determine if a reservation has been 209957303d80SAndy Whitcroft * consumed. 210057303d80SAndy Whitcroft */ 210157303d80SAndy Whitcroft if (write_access && !pte_write(entry)) { 21022b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 21032b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 2104b4d1d99fSDavid Gibson goto out_mutex; 21052b26736cSAndy Whitcroft } 210657303d80SAndy Whitcroft 210757303d80SAndy Whitcroft if (!(vma->vm_flags & VM_SHARED)) 210857303d80SAndy Whitcroft pagecache_page = hugetlbfs_pagecache_page(h, 210957303d80SAndy Whitcroft vma, address); 211057303d80SAndy Whitcroft } 211157303d80SAndy Whitcroft 21121e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 21131e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 2114b4d1d99fSDavid Gibson if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 2115b4d1d99fSDavid Gibson goto out_page_table_lock; 2116b4d1d99fSDavid Gibson 2117b4d1d99fSDavid Gibson 2118b4d1d99fSDavid Gibson if (write_access) { 2119b4d1d99fSDavid Gibson if (!pte_write(entry)) { 212057303d80SAndy Whitcroft ret = hugetlb_cow(mm, vma, address, ptep, entry, 212157303d80SAndy Whitcroft pagecache_page); 2122b4d1d99fSDavid Gibson goto out_page_table_lock; 2123b4d1d99fSDavid Gibson } 2124b4d1d99fSDavid Gibson entry = pte_mkdirty(entry); 2125b4d1d99fSDavid Gibson } 2126b4d1d99fSDavid Gibson entry = pte_mkyoung(entry); 2127b4d1d99fSDavid Gibson if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access)) 2128b4d1d99fSDavid Gibson update_mmu_cache(vma, address, entry); 2129b4d1d99fSDavid Gibson 2130b4d1d99fSDavid Gibson out_page_table_lock: 21311e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 213257303d80SAndy Whitcroft 213357303d80SAndy Whitcroft if (pagecache_page) { 213457303d80SAndy Whitcroft unlock_page(pagecache_page); 213557303d80SAndy Whitcroft put_page(pagecache_page); 213657303d80SAndy Whitcroft } 213757303d80SAndy Whitcroft 2138b4d1d99fSDavid Gibson out_mutex: 21393935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 21401e8f889bSDavid Gibson 21411e8f889bSDavid Gibson return ret; 214286e5216fSAdam Litke } 214386e5216fSAdam Litke 2144ceb86879SAndi Kleen /* Can be overriden by architectures */ 2145ceb86879SAndi Kleen __attribute__((weak)) struct page * 2146ceb86879SAndi Kleen follow_huge_pud(struct mm_struct *mm, unsigned long address, 2147ceb86879SAndi Kleen pud_t *pud, int write) 2148ceb86879SAndi Kleen { 2149ceb86879SAndi Kleen BUG(); 2150ceb86879SAndi Kleen return NULL; 2151ceb86879SAndi Kleen } 2152ceb86879SAndi Kleen 21534b2e38adSKOSAKI Motohiro static int huge_zeropage_ok(pte_t *ptep, int write, int shared) 21544b2e38adSKOSAKI Motohiro { 21554b2e38adSKOSAKI Motohiro if (!ptep || write || shared) 21564b2e38adSKOSAKI Motohiro return 0; 21574b2e38adSKOSAKI Motohiro else 21584b2e38adSKOSAKI Motohiro return huge_pte_none(huge_ptep_get(ptep)); 21594b2e38adSKOSAKI Motohiro } 21604b2e38adSKOSAKI Motohiro 216163551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 216263551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 21635b23dbe8SAdam Litke unsigned long *position, int *length, int i, 21645b23dbe8SAdam Litke int write) 216563551ae0SDavid Gibson { 2166d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 2167d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 216863551ae0SDavid Gibson int remainder = *length; 2169a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 21704b2e38adSKOSAKI Motohiro int zeropage_ok = 0; 21714b2e38adSKOSAKI Motohiro int shared = vma->vm_flags & VM_SHARED; 217263551ae0SDavid Gibson 21731c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 217463551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 217563551ae0SDavid Gibson pte_t *pte; 217663551ae0SDavid Gibson struct page *page; 217763551ae0SDavid Gibson 21784c887265SAdam Litke /* 21794c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 21804c887265SAdam Litke * each hugepage. We have to make * sure we get the 21814c887265SAdam Litke * first, for the page indexing below to work. 21824c887265SAdam Litke */ 2183a5516438SAndi Kleen pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 21844b2e38adSKOSAKI Motohiro if (huge_zeropage_ok(pte, write, shared)) 21854b2e38adSKOSAKI Motohiro zeropage_ok = 1; 218663551ae0SDavid Gibson 21874b2e38adSKOSAKI Motohiro if (!pte || 21884b2e38adSKOSAKI Motohiro (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) || 21897f2e9525SGerald Schaefer (write && !pte_write(huge_ptep_get(pte)))) { 21904c887265SAdam Litke int ret; 21914c887265SAdam Litke 21924c887265SAdam Litke spin_unlock(&mm->page_table_lock); 21935b23dbe8SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, write); 21944c887265SAdam Litke spin_lock(&mm->page_table_lock); 2195a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 21964c887265SAdam Litke continue; 21974c887265SAdam Litke 21981c59827dSHugh Dickins remainder = 0; 21991c59827dSHugh Dickins if (!i) 22001c59827dSHugh Dickins i = -EFAULT; 22011c59827dSHugh Dickins break; 22021c59827dSHugh Dickins } 220363551ae0SDavid Gibson 2204a5516438SAndi Kleen pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 22057f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 2206d5d4b0aaSChen, Kenneth W same_page: 2207d6692183SChen, Kenneth W if (pages) { 22084b2e38adSKOSAKI Motohiro if (zeropage_ok) 22094b2e38adSKOSAKI Motohiro pages[i] = ZERO_PAGE(0); 22104b2e38adSKOSAKI Motohiro else 221169d177c2SAndy Whitcroft pages[i] = mem_map_offset(page, pfn_offset); 22124b2e38adSKOSAKI Motohiro get_page(pages[i]); 2213d6692183SChen, Kenneth W } 221463551ae0SDavid Gibson 221563551ae0SDavid Gibson if (vmas) 221663551ae0SDavid Gibson vmas[i] = vma; 221763551ae0SDavid Gibson 221863551ae0SDavid Gibson vaddr += PAGE_SIZE; 2219d5d4b0aaSChen, Kenneth W ++pfn_offset; 222063551ae0SDavid Gibson --remainder; 222163551ae0SDavid Gibson ++i; 2222d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 2223a5516438SAndi Kleen pfn_offset < pages_per_huge_page(h)) { 2224d5d4b0aaSChen, Kenneth W /* 2225d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 2226d5d4b0aaSChen, Kenneth W * of this compound page. 2227d5d4b0aaSChen, Kenneth W */ 2228d5d4b0aaSChen, Kenneth W goto same_page; 2229d5d4b0aaSChen, Kenneth W } 223063551ae0SDavid Gibson } 22311c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 223263551ae0SDavid Gibson *length = remainder; 223363551ae0SDavid Gibson *position = vaddr; 223463551ae0SDavid Gibson 223563551ae0SDavid Gibson return i; 223663551ae0SDavid Gibson } 22378f860591SZhang, Yanmin 22388f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 22398f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 22408f860591SZhang, Yanmin { 22418f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 22428f860591SZhang, Yanmin unsigned long start = address; 22438f860591SZhang, Yanmin pte_t *ptep; 22448f860591SZhang, Yanmin pte_t pte; 2245a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 22468f860591SZhang, Yanmin 22478f860591SZhang, Yanmin BUG_ON(address >= end); 22488f860591SZhang, Yanmin flush_cache_range(vma, address, end); 22498f860591SZhang, Yanmin 225039dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 22518f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 2252a5516438SAndi Kleen for (; address < end; address += huge_page_size(h)) { 22538f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 22548f860591SZhang, Yanmin if (!ptep) 22558f860591SZhang, Yanmin continue; 225639dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 225739dde65cSChen, Kenneth W continue; 22587f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 22598f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 22608f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 22618f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 22628f860591SZhang, Yanmin } 22638f860591SZhang, Yanmin } 22648f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 226539dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 22668f860591SZhang, Yanmin 22678f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 22688f860591SZhang, Yanmin } 22698f860591SZhang, Yanmin 2270a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 2271a1e78772SMel Gorman long from, long to, 22725a6fe125SMel Gorman struct vm_area_struct *vma, 22735a6fe125SMel Gorman int acctflag) 2274e4e574b7SAdam Litke { 227517c9d12eSMel Gorman long ret, chg; 2276a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 2277e4e574b7SAdam Litke 2278a1e78772SMel Gorman /* 227917c9d12eSMel Gorman * Only apply hugepage reservation if asked. At fault time, an 228017c9d12eSMel Gorman * attempt will be made for VM_NORESERVE to allocate a page 228117c9d12eSMel Gorman * and filesystem quota without using reserves 228217c9d12eSMel Gorman */ 228317c9d12eSMel Gorman if (acctflag & VM_NORESERVE) 228417c9d12eSMel Gorman return 0; 228517c9d12eSMel Gorman 228617c9d12eSMel Gorman /* 2287a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 2288a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 2289a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 2290a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 2291a1e78772SMel Gorman */ 2292a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 2293e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 22945a6fe125SMel Gorman else { 22955a6fe125SMel Gorman struct resv_map *resv_map = resv_map_alloc(); 22965a6fe125SMel Gorman if (!resv_map) 22975a6fe125SMel Gorman return -ENOMEM; 22985a6fe125SMel Gorman 229917c9d12eSMel Gorman chg = to - from; 230017c9d12eSMel Gorman 23015a6fe125SMel Gorman set_vma_resv_map(vma, resv_map); 23025a6fe125SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 23035a6fe125SMel Gorman } 23045a6fe125SMel Gorman 230517c9d12eSMel Gorman if (chg < 0) 230617c9d12eSMel Gorman return chg; 230717c9d12eSMel Gorman 230817c9d12eSMel Gorman /* There must be enough filesystem quota for the mapping */ 230917c9d12eSMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 231017c9d12eSMel Gorman return -ENOSPC; 231117c9d12eSMel Gorman 231217c9d12eSMel Gorman /* 231317c9d12eSMel Gorman * Check enough hugepages are available for the reservation. 231417c9d12eSMel Gorman * Hand back the quota if there are not 231517c9d12eSMel Gorman */ 231617c9d12eSMel Gorman ret = hugetlb_acct_memory(h, chg); 231717c9d12eSMel Gorman if (ret < 0) { 231817c9d12eSMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 231917c9d12eSMel Gorman return ret; 232017c9d12eSMel Gorman } 232117c9d12eSMel Gorman 232217c9d12eSMel Gorman /* 232317c9d12eSMel Gorman * Account for the reservations made. Shared mappings record regions 232417c9d12eSMel Gorman * that have reservations as they are shared by multiple VMAs. 232517c9d12eSMel Gorman * When the last VMA disappears, the region map says how much 232617c9d12eSMel Gorman * the reservation was and the page cache tells how much of 232717c9d12eSMel Gorman * the reservation was consumed. Private mappings are per-VMA and 232817c9d12eSMel Gorman * only the consumed reservations are tracked. When the VMA 232917c9d12eSMel Gorman * disappears, the original reservation is the VMA size and the 233017c9d12eSMel Gorman * consumed reservations are stored in the map. Hence, nothing 233117c9d12eSMel Gorman * else has to be done for private mappings here 233217c9d12eSMel Gorman */ 233317c9d12eSMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 233417c9d12eSMel Gorman region_add(&inode->i_mapping->private_list, from, to); 2335a43a8c39SChen, Kenneth W return 0; 2336a43a8c39SChen, Kenneth W } 2337a43a8c39SChen, Kenneth W 2338a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 2339a43a8c39SChen, Kenneth W { 2340a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 2341a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 234245c682a6SKen Chen 234345c682a6SKen Chen spin_lock(&inode->i_lock); 2344a5516438SAndi Kleen inode->i_blocks -= blocks_per_huge_page(h); 234545c682a6SKen Chen spin_unlock(&inode->i_lock); 234645c682a6SKen Chen 234790d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 2348a5516438SAndi Kleen hugetlb_acct_memory(h, -(chg - freed)); 2349a43a8c39SChen, Kenneth W } 2350