11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 12cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 131da177e4SLinus Torvalds #include <linux/nodemask.h> 1463551ae0SDavid Gibson #include <linux/pagemap.h> 155da7ca86SChristoph Lameter #include <linux/mempolicy.h> 16aea47ff3SChristoph Lameter #include <linux/cpuset.h> 173935baa9SDavid Gibson #include <linux/mutex.h> 18aa888a74SAndi Kleen #include <linux/bootmem.h> 19a3437870SNishanth Aravamudan #include <linux/sysfs.h> 20d6606683SLinus Torvalds 2163551ae0SDavid Gibson #include <asm/page.h> 2263551ae0SDavid Gibson #include <asm/pgtable.h> 2378a34ae2SAdrian Bunk #include <asm/io.h> 2463551ae0SDavid Gibson 2563551ae0SDavid Gibson #include <linux/hugetlb.h> 267835e98bSNick Piggin #include "internal.h" 271da177e4SLinus Torvalds 281da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 29396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 30396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 31a5516438SAndi Kleen 32e5ff2159SAndi Kleen static int max_hstate; 33e5ff2159SAndi Kleen unsigned int default_hstate_idx; 34e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE]; 35e5ff2159SAndi Kleen 3653ba51d2SJon Tollefson __initdata LIST_HEAD(huge_boot_pages); 3753ba51d2SJon Tollefson 38e5ff2159SAndi Kleen /* for command line parsing */ 39e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate; 40e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages; 41e11bfbfcSNick Piggin static unsigned long __initdata default_hstate_size; 42e5ff2159SAndi Kleen 43e5ff2159SAndi Kleen #define for_each_hstate(h) \ 44e5ff2159SAndi Kleen for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++) 45396faf03SMel Gorman 463935baa9SDavid Gibson /* 473935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 483935baa9SDavid Gibson */ 493935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 500bd0f9fbSEric Paris 51e7c4b0bfSAndy Whitcroft /* 5296822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 5396822904SAndy Whitcroft * across the pages in a mapping. 5484afd99bSAndy Whitcroft * 5584afd99bSAndy Whitcroft * The region data structures are protected by a combination of the mmap_sem 5684afd99bSAndy Whitcroft * and the hugetlb_instantion_mutex. To access or modify a region the caller 5784afd99bSAndy Whitcroft * must either hold the mmap_sem for write, or the mmap_sem for read and 5884afd99bSAndy Whitcroft * the hugetlb_instantiation mutex: 5984afd99bSAndy Whitcroft * 6084afd99bSAndy Whitcroft * down_write(&mm->mmap_sem); 6184afd99bSAndy Whitcroft * or 6284afd99bSAndy Whitcroft * down_read(&mm->mmap_sem); 6384afd99bSAndy Whitcroft * mutex_lock(&hugetlb_instantiation_mutex); 6496822904SAndy Whitcroft */ 6596822904SAndy Whitcroft struct file_region { 6696822904SAndy Whitcroft struct list_head link; 6796822904SAndy Whitcroft long from; 6896822904SAndy Whitcroft long to; 6996822904SAndy Whitcroft }; 7096822904SAndy Whitcroft 7196822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 7296822904SAndy Whitcroft { 7396822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 7496822904SAndy Whitcroft 7596822904SAndy Whitcroft /* Locate the region we are either in or before. */ 7696822904SAndy Whitcroft list_for_each_entry(rg, head, link) 7796822904SAndy Whitcroft if (f <= rg->to) 7896822904SAndy Whitcroft break; 7996822904SAndy Whitcroft 8096822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 8196822904SAndy Whitcroft if (f > rg->from) 8296822904SAndy Whitcroft f = rg->from; 8396822904SAndy Whitcroft 8496822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 8596822904SAndy Whitcroft nrg = rg; 8696822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 8796822904SAndy Whitcroft if (&rg->link == head) 8896822904SAndy Whitcroft break; 8996822904SAndy Whitcroft if (rg->from > t) 9096822904SAndy Whitcroft break; 9196822904SAndy Whitcroft 9296822904SAndy Whitcroft /* If this area reaches higher then extend our area to 9396822904SAndy Whitcroft * include it completely. If this is not the first area 9496822904SAndy Whitcroft * which we intend to reuse, free it. */ 9596822904SAndy Whitcroft if (rg->to > t) 9696822904SAndy Whitcroft t = rg->to; 9796822904SAndy Whitcroft if (rg != nrg) { 9896822904SAndy Whitcroft list_del(&rg->link); 9996822904SAndy Whitcroft kfree(rg); 10096822904SAndy Whitcroft } 10196822904SAndy Whitcroft } 10296822904SAndy Whitcroft nrg->from = f; 10396822904SAndy Whitcroft nrg->to = t; 10496822904SAndy Whitcroft return 0; 10596822904SAndy Whitcroft } 10696822904SAndy Whitcroft 10796822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 10896822904SAndy Whitcroft { 10996822904SAndy Whitcroft struct file_region *rg, *nrg; 11096822904SAndy Whitcroft long chg = 0; 11196822904SAndy Whitcroft 11296822904SAndy Whitcroft /* Locate the region we are before or in. */ 11396822904SAndy Whitcroft list_for_each_entry(rg, head, link) 11496822904SAndy Whitcroft if (f <= rg->to) 11596822904SAndy Whitcroft break; 11696822904SAndy Whitcroft 11796822904SAndy Whitcroft /* If we are below the current region then a new region is required. 11896822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 11996822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 12096822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 12196822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 12296822904SAndy Whitcroft if (!nrg) 12396822904SAndy Whitcroft return -ENOMEM; 12496822904SAndy Whitcroft nrg->from = f; 12596822904SAndy Whitcroft nrg->to = f; 12696822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 12796822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 12896822904SAndy Whitcroft 12996822904SAndy Whitcroft return t - f; 13096822904SAndy Whitcroft } 13196822904SAndy Whitcroft 13296822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 13396822904SAndy Whitcroft if (f > rg->from) 13496822904SAndy Whitcroft f = rg->from; 13596822904SAndy Whitcroft chg = t - f; 13696822904SAndy Whitcroft 13796822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 13896822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 13996822904SAndy Whitcroft if (&rg->link == head) 14096822904SAndy Whitcroft break; 14196822904SAndy Whitcroft if (rg->from > t) 14296822904SAndy Whitcroft return chg; 14396822904SAndy Whitcroft 14496822904SAndy Whitcroft /* We overlap with this area, if it extends futher than 14596822904SAndy Whitcroft * us then we must extend ourselves. Account for its 14696822904SAndy Whitcroft * existing reservation. */ 14796822904SAndy Whitcroft if (rg->to > t) { 14896822904SAndy Whitcroft chg += rg->to - t; 14996822904SAndy Whitcroft t = rg->to; 15096822904SAndy Whitcroft } 15196822904SAndy Whitcroft chg -= rg->to - rg->from; 15296822904SAndy Whitcroft } 15396822904SAndy Whitcroft return chg; 15496822904SAndy Whitcroft } 15596822904SAndy Whitcroft 15696822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 15796822904SAndy Whitcroft { 15896822904SAndy Whitcroft struct file_region *rg, *trg; 15996822904SAndy Whitcroft long chg = 0; 16096822904SAndy Whitcroft 16196822904SAndy Whitcroft /* Locate the region we are either in or before. */ 16296822904SAndy Whitcroft list_for_each_entry(rg, head, link) 16396822904SAndy Whitcroft if (end <= rg->to) 16496822904SAndy Whitcroft break; 16596822904SAndy Whitcroft if (&rg->link == head) 16696822904SAndy Whitcroft return 0; 16796822904SAndy Whitcroft 16896822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 16996822904SAndy Whitcroft if (end > rg->from) { 17096822904SAndy Whitcroft chg = rg->to - end; 17196822904SAndy Whitcroft rg->to = end; 17296822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 17396822904SAndy Whitcroft } 17496822904SAndy Whitcroft 17596822904SAndy Whitcroft /* Drop any remaining regions. */ 17696822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 17796822904SAndy Whitcroft if (&rg->link == head) 17896822904SAndy Whitcroft break; 17996822904SAndy Whitcroft chg += rg->to - rg->from; 18096822904SAndy Whitcroft list_del(&rg->link); 18196822904SAndy Whitcroft kfree(rg); 18296822904SAndy Whitcroft } 18396822904SAndy Whitcroft return chg; 18496822904SAndy Whitcroft } 18596822904SAndy Whitcroft 18684afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t) 18784afd99bSAndy Whitcroft { 18884afd99bSAndy Whitcroft struct file_region *rg; 18984afd99bSAndy Whitcroft long chg = 0; 19084afd99bSAndy Whitcroft 19184afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 19284afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 19384afd99bSAndy Whitcroft int seg_from; 19484afd99bSAndy Whitcroft int seg_to; 19584afd99bSAndy Whitcroft 19684afd99bSAndy Whitcroft if (rg->to <= f) 19784afd99bSAndy Whitcroft continue; 19884afd99bSAndy Whitcroft if (rg->from >= t) 19984afd99bSAndy Whitcroft break; 20084afd99bSAndy Whitcroft 20184afd99bSAndy Whitcroft seg_from = max(rg->from, f); 20284afd99bSAndy Whitcroft seg_to = min(rg->to, t); 20384afd99bSAndy Whitcroft 20484afd99bSAndy Whitcroft chg += seg_to - seg_from; 20584afd99bSAndy Whitcroft } 20684afd99bSAndy Whitcroft 20784afd99bSAndy Whitcroft return chg; 20884afd99bSAndy Whitcroft } 20984afd99bSAndy Whitcroft 21096822904SAndy Whitcroft /* 211e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 212e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 213e7c4b0bfSAndy Whitcroft */ 214a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h, 215a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 216e7c4b0bfSAndy Whitcroft { 217a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) + 218a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h)); 219e7c4b0bfSAndy Whitcroft } 220e7c4b0bfSAndy Whitcroft 22184afd99bSAndy Whitcroft /* 22284afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 22384afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 22484afd99bSAndy Whitcroft * alignment. 22584afd99bSAndy Whitcroft */ 22684afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 22784afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 22804f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 22984afd99bSAndy Whitcroft 230a1e78772SMel Gorman /* 231a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 232a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 233a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 234a1e78772SMel Gorman * 235a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 236a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 237a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 238a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 23984afd99bSAndy Whitcroft * 24084afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 24184afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 24284afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 24384afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 24484afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 24584afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 24684afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 24784afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 248a1e78772SMel Gorman */ 249e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 250e7c4b0bfSAndy Whitcroft { 251e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 252e7c4b0bfSAndy Whitcroft } 253e7c4b0bfSAndy Whitcroft 254e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 255e7c4b0bfSAndy Whitcroft unsigned long value) 256e7c4b0bfSAndy Whitcroft { 257e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 258e7c4b0bfSAndy Whitcroft } 259e7c4b0bfSAndy Whitcroft 26084afd99bSAndy Whitcroft struct resv_map { 26184afd99bSAndy Whitcroft struct kref refs; 26284afd99bSAndy Whitcroft struct list_head regions; 26384afd99bSAndy Whitcroft }; 26484afd99bSAndy Whitcroft 26584afd99bSAndy Whitcroft struct resv_map *resv_map_alloc(void) 26684afd99bSAndy Whitcroft { 26784afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 26884afd99bSAndy Whitcroft if (!resv_map) 26984afd99bSAndy Whitcroft return NULL; 27084afd99bSAndy Whitcroft 27184afd99bSAndy Whitcroft kref_init(&resv_map->refs); 27284afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 27384afd99bSAndy Whitcroft 27484afd99bSAndy Whitcroft return resv_map; 27584afd99bSAndy Whitcroft } 27684afd99bSAndy Whitcroft 27784afd99bSAndy Whitcroft void resv_map_release(struct kref *ref) 27884afd99bSAndy Whitcroft { 27984afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 28084afd99bSAndy Whitcroft 28184afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 28284afd99bSAndy Whitcroft region_truncate(&resv_map->regions, 0); 28384afd99bSAndy Whitcroft kfree(resv_map); 28484afd99bSAndy Whitcroft } 28584afd99bSAndy Whitcroft 28684afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 287a1e78772SMel Gorman { 288a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 289a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 29084afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 29184afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 292a1e78772SMel Gorman return 0; 293a1e78772SMel Gorman } 294a1e78772SMel Gorman 29584afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 296a1e78772SMel Gorman { 297a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 298a1e78772SMel Gorman VM_BUG_ON(vma->vm_flags & VM_SHARED); 299a1e78772SMel Gorman 30084afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 30184afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 30204f2cbe3SMel Gorman } 30304f2cbe3SMel Gorman 30404f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 30504f2cbe3SMel Gorman { 30604f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 307e7c4b0bfSAndy Whitcroft VM_BUG_ON(vma->vm_flags & VM_SHARED); 308e7c4b0bfSAndy Whitcroft 309e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 31004f2cbe3SMel Gorman } 31104f2cbe3SMel Gorman 31204f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 31304f2cbe3SMel Gorman { 31404f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 315e7c4b0bfSAndy Whitcroft 316e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 317a1e78772SMel Gorman } 318a1e78772SMel Gorman 319a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */ 320a5516438SAndi Kleen static void decrement_hugepage_resv_vma(struct hstate *h, 321a5516438SAndi Kleen struct vm_area_struct *vma) 322a1e78772SMel Gorman { 323c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_NORESERVE) 324c37f9fb1SAndy Whitcroft return; 325c37f9fb1SAndy Whitcroft 326a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) { 327a1e78772SMel Gorman /* Shared mappings always use reserves */ 328a5516438SAndi Kleen h->resv_huge_pages--; 32984afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 330a1e78772SMel Gorman /* 331a1e78772SMel Gorman * Only the process that called mmap() has reserves for 332a1e78772SMel Gorman * private mappings. 333a1e78772SMel Gorman */ 334a5516438SAndi Kleen h->resv_huge_pages--; 335a1e78772SMel Gorman } 336a1e78772SMel Gorman } 337a1e78772SMel Gorman 33804f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 339a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 340a1e78772SMel Gorman { 341a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 342a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 343a1e78772SMel Gorman vma->vm_private_data = (void *)0; 344a1e78772SMel Gorman } 345a1e78772SMel Gorman 346a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 3477f09ca51SMel Gorman static int vma_has_reserves(struct vm_area_struct *vma) 348a1e78772SMel Gorman { 349a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) 350a1e78772SMel Gorman return 1; 3517f09ca51SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 3527f09ca51SMel Gorman return 1; 3537f09ca51SMel Gorman return 0; 354a1e78772SMel Gorman } 355a1e78772SMel Gorman 356a5516438SAndi Kleen static void clear_huge_page(struct page *page, 357a5516438SAndi Kleen unsigned long addr, unsigned long sz) 35879ac6ba4SDavid Gibson { 35979ac6ba4SDavid Gibson int i; 36079ac6ba4SDavid Gibson 36179ac6ba4SDavid Gibson might_sleep(); 362a5516438SAndi Kleen for (i = 0; i < sz/PAGE_SIZE; i++) { 36379ac6ba4SDavid Gibson cond_resched(); 364281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 36579ac6ba4SDavid Gibson } 36679ac6ba4SDavid Gibson } 36779ac6ba4SDavid Gibson 36879ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 3699de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 37079ac6ba4SDavid Gibson { 37179ac6ba4SDavid Gibson int i; 372a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 37379ac6ba4SDavid Gibson 37479ac6ba4SDavid Gibson might_sleep(); 375a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 37679ac6ba4SDavid Gibson cond_resched(); 3779de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 37879ac6ba4SDavid Gibson } 37979ac6ba4SDavid Gibson } 38079ac6ba4SDavid Gibson 381a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page) 3821da177e4SLinus Torvalds { 3831da177e4SLinus Torvalds int nid = page_to_nid(page); 384a5516438SAndi Kleen list_add(&page->lru, &h->hugepage_freelists[nid]); 385a5516438SAndi Kleen h->free_huge_pages++; 386a5516438SAndi Kleen h->free_huge_pages_node[nid]++; 3871da177e4SLinus Torvalds } 3881da177e4SLinus Torvalds 389a5516438SAndi Kleen static struct page *dequeue_huge_page(struct hstate *h) 390348e1e04SNishanth Aravamudan { 391348e1e04SNishanth Aravamudan int nid; 392348e1e04SNishanth Aravamudan struct page *page = NULL; 393348e1e04SNishanth Aravamudan 394348e1e04SNishanth Aravamudan for (nid = 0; nid < MAX_NUMNODES; ++nid) { 395a5516438SAndi Kleen if (!list_empty(&h->hugepage_freelists[nid])) { 396a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 397348e1e04SNishanth Aravamudan struct page, lru); 398348e1e04SNishanth Aravamudan list_del(&page->lru); 399a5516438SAndi Kleen h->free_huge_pages--; 400a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 401348e1e04SNishanth Aravamudan break; 402348e1e04SNishanth Aravamudan } 403348e1e04SNishanth Aravamudan } 404348e1e04SNishanth Aravamudan return page; 405348e1e04SNishanth Aravamudan } 406348e1e04SNishanth Aravamudan 407a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h, 408a5516438SAndi Kleen struct vm_area_struct *vma, 40904f2cbe3SMel Gorman unsigned long address, int avoid_reserve) 4101da177e4SLinus Torvalds { 41131a5c6e4SNishanth Aravamudan int nid; 4121da177e4SLinus Torvalds struct page *page = NULL; 413480eccf9SLee Schermerhorn struct mempolicy *mpol; 41419770b32SMel Gorman nodemask_t *nodemask; 415396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 41619770b32SMel Gorman htlb_alloc_mask, &mpol, &nodemask); 417dd1a239fSMel Gorman struct zone *zone; 418dd1a239fSMel Gorman struct zoneref *z; 4191da177e4SLinus Torvalds 420a1e78772SMel Gorman /* 421a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 422a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 423a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 424a1e78772SMel Gorman */ 4257f09ca51SMel Gorman if (!vma_has_reserves(vma) && 426a5516438SAndi Kleen h->free_huge_pages - h->resv_huge_pages == 0) 427a1e78772SMel Gorman return NULL; 428a1e78772SMel Gorman 42904f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 430a5516438SAndi Kleen if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 43104f2cbe3SMel Gorman return NULL; 43204f2cbe3SMel Gorman 43319770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 43419770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 43554a6eb5cSMel Gorman nid = zone_to_nid(zone); 43654a6eb5cSMel Gorman if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 437a5516438SAndi Kleen !list_empty(&h->hugepage_freelists[nid])) { 438a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 4391da177e4SLinus Torvalds struct page, lru); 4401da177e4SLinus Torvalds list_del(&page->lru); 441a5516438SAndi Kleen h->free_huge_pages--; 442a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 44304f2cbe3SMel Gorman 44404f2cbe3SMel Gorman if (!avoid_reserve) 445a5516438SAndi Kleen decrement_hugepage_resv_vma(h, vma); 446a1e78772SMel Gorman 4475ab3ee7bSKen Chen break; 4481da177e4SLinus Torvalds } 4493abf7afdSAndrew Morton } 45052cd3b07SLee Schermerhorn mpol_cond_put(mpol); 4511da177e4SLinus Torvalds return page; 4521da177e4SLinus Torvalds } 4531da177e4SLinus Torvalds 454a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page) 4556af2acb6SAdam Litke { 4566af2acb6SAdam Litke int i; 457a5516438SAndi Kleen 458a5516438SAndi Kleen h->nr_huge_pages--; 459a5516438SAndi Kleen h->nr_huge_pages_node[page_to_nid(page)]--; 460a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 4616af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 4626af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 4636af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 4646af2acb6SAdam Litke } 4656af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 4666af2acb6SAdam Litke set_page_refcounted(page); 4677f2e9525SGerald Schaefer arch_release_hugepage(page); 468a5516438SAndi Kleen __free_pages(page, huge_page_order(h)); 4696af2acb6SAdam Litke } 4706af2acb6SAdam Litke 471e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size) 472e5ff2159SAndi Kleen { 473e5ff2159SAndi Kleen struct hstate *h; 474e5ff2159SAndi Kleen 475e5ff2159SAndi Kleen for_each_hstate(h) { 476e5ff2159SAndi Kleen if (huge_page_size(h) == size) 477e5ff2159SAndi Kleen return h; 478e5ff2159SAndi Kleen } 479e5ff2159SAndi Kleen return NULL; 480e5ff2159SAndi Kleen } 481e5ff2159SAndi Kleen 48227a85ef1SDavid Gibson static void free_huge_page(struct page *page) 48327a85ef1SDavid Gibson { 484a5516438SAndi Kleen /* 485a5516438SAndi Kleen * Can't pass hstate in here because it is called from the 486a5516438SAndi Kleen * compound page destructor. 487a5516438SAndi Kleen */ 488e5ff2159SAndi Kleen struct hstate *h = page_hstate(page); 4897893d1d5SAdam Litke int nid = page_to_nid(page); 490c79fb75eSAdam Litke struct address_space *mapping; 49127a85ef1SDavid Gibson 492c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 493e5df70abSAndy Whitcroft set_page_private(page, 0); 4947893d1d5SAdam Litke BUG_ON(page_count(page)); 49527a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 49627a85ef1SDavid Gibson 49727a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 498aa888a74SAndi Kleen if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { 499a5516438SAndi Kleen update_and_free_page(h, page); 500a5516438SAndi Kleen h->surplus_huge_pages--; 501a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 5027893d1d5SAdam Litke } else { 503a5516438SAndi Kleen enqueue_huge_page(h, page); 5047893d1d5SAdam Litke } 50527a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 506c79fb75eSAdam Litke if (mapping) 5079a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 50827a85ef1SDavid Gibson } 50927a85ef1SDavid Gibson 5107893d1d5SAdam Litke /* 5117893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 5127893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 5137893d1d5SAdam Litke * Returns 1 if an adjustment was made. 5147893d1d5SAdam Litke */ 515a5516438SAndi Kleen static int adjust_pool_surplus(struct hstate *h, int delta) 5167893d1d5SAdam Litke { 5177893d1d5SAdam Litke static int prev_nid; 5187893d1d5SAdam Litke int nid = prev_nid; 5197893d1d5SAdam Litke int ret = 0; 5207893d1d5SAdam Litke 5217893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 5227893d1d5SAdam Litke do { 5237893d1d5SAdam Litke nid = next_node(nid, node_online_map); 5247893d1d5SAdam Litke if (nid == MAX_NUMNODES) 5257893d1d5SAdam Litke nid = first_node(node_online_map); 5267893d1d5SAdam Litke 5277893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 528a5516438SAndi Kleen if (delta < 0 && !h->surplus_huge_pages_node[nid]) 5297893d1d5SAdam Litke continue; 5307893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 531a5516438SAndi Kleen if (delta > 0 && h->surplus_huge_pages_node[nid] >= 532a5516438SAndi Kleen h->nr_huge_pages_node[nid]) 5337893d1d5SAdam Litke continue; 5347893d1d5SAdam Litke 535a5516438SAndi Kleen h->surplus_huge_pages += delta; 536a5516438SAndi Kleen h->surplus_huge_pages_node[nid] += delta; 5377893d1d5SAdam Litke ret = 1; 5387893d1d5SAdam Litke break; 5397893d1d5SAdam Litke } while (nid != prev_nid); 5407893d1d5SAdam Litke 5417893d1d5SAdam Litke prev_nid = nid; 5427893d1d5SAdam Litke return ret; 5437893d1d5SAdam Litke } 5447893d1d5SAdam Litke 545a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 546b7ba30c6SAndi Kleen { 547b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 548b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 549a5516438SAndi Kleen h->nr_huge_pages++; 550a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 551b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 552b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 553b7ba30c6SAndi Kleen } 554b7ba30c6SAndi Kleen 555a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 5561da177e4SLinus Torvalds { 5571da177e4SLinus Torvalds struct page *page; 558f96efd58SJoe Jin 559aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 560aa888a74SAndi Kleen return NULL; 561aa888a74SAndi Kleen 56263b4613cSNishanth Aravamudan page = alloc_pages_node(nid, 563551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 564551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 565a5516438SAndi Kleen huge_page_order(h)); 5661da177e4SLinus Torvalds if (page) { 5677f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 568caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 5697b8ee84dSHarvey Harrison return NULL; 5707f2e9525SGerald Schaefer } 571a5516438SAndi Kleen prep_new_huge_page(h, page, nid); 5721da177e4SLinus Torvalds } 57363b4613cSNishanth Aravamudan 57463b4613cSNishanth Aravamudan return page; 57563b4613cSNishanth Aravamudan } 57663b4613cSNishanth Aravamudan 5775ced66c9SAndi Kleen /* 5785ced66c9SAndi Kleen * Use a helper variable to find the next node and then 5795ced66c9SAndi Kleen * copy it back to hugetlb_next_nid afterwards: 5805ced66c9SAndi Kleen * otherwise there's a window in which a racer might 5815ced66c9SAndi Kleen * pass invalid nid MAX_NUMNODES to alloc_pages_node. 5825ced66c9SAndi Kleen * But we don't need to use a spin_lock here: it really 5835ced66c9SAndi Kleen * doesn't matter if occasionally a racer chooses the 5845ced66c9SAndi Kleen * same nid as we do. Move nid forward in the mask even 5855ced66c9SAndi Kleen * if we just successfully allocated a hugepage so that 5865ced66c9SAndi Kleen * the next caller gets hugepages on the next node. 5875ced66c9SAndi Kleen */ 5885ced66c9SAndi Kleen static int hstate_next_node(struct hstate *h) 5895ced66c9SAndi Kleen { 5905ced66c9SAndi Kleen int next_nid; 5915ced66c9SAndi Kleen next_nid = next_node(h->hugetlb_next_nid, node_online_map); 5925ced66c9SAndi Kleen if (next_nid == MAX_NUMNODES) 5935ced66c9SAndi Kleen next_nid = first_node(node_online_map); 5945ced66c9SAndi Kleen h->hugetlb_next_nid = next_nid; 5955ced66c9SAndi Kleen return next_nid; 5965ced66c9SAndi Kleen } 5975ced66c9SAndi Kleen 598a5516438SAndi Kleen static int alloc_fresh_huge_page(struct hstate *h) 59963b4613cSNishanth Aravamudan { 60063b4613cSNishanth Aravamudan struct page *page; 60163b4613cSNishanth Aravamudan int start_nid; 60263b4613cSNishanth Aravamudan int next_nid; 60363b4613cSNishanth Aravamudan int ret = 0; 60463b4613cSNishanth Aravamudan 605a5516438SAndi Kleen start_nid = h->hugetlb_next_nid; 60663b4613cSNishanth Aravamudan 60763b4613cSNishanth Aravamudan do { 608a5516438SAndi Kleen page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); 60963b4613cSNishanth Aravamudan if (page) 61063b4613cSNishanth Aravamudan ret = 1; 6115ced66c9SAndi Kleen next_nid = hstate_next_node(h); 612a5516438SAndi Kleen } while (!page && h->hugetlb_next_nid != start_nid); 61363b4613cSNishanth Aravamudan 6143b116300SAdam Litke if (ret) 6153b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 6163b116300SAdam Litke else 6173b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 6183b116300SAdam Litke 61963b4613cSNishanth Aravamudan return ret; 6201da177e4SLinus Torvalds } 6211da177e4SLinus Torvalds 622a5516438SAndi Kleen static struct page *alloc_buddy_huge_page(struct hstate *h, 623a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 6247893d1d5SAdam Litke { 6257893d1d5SAdam Litke struct page *page; 626d1c3fb1fSNishanth Aravamudan unsigned int nid; 6277893d1d5SAdam Litke 628aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 629aa888a74SAndi Kleen return NULL; 630aa888a74SAndi Kleen 631d1c3fb1fSNishanth Aravamudan /* 632d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 633d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 634d1c3fb1fSNishanth Aravamudan * overcommit 635d1c3fb1fSNishanth Aravamudan * 636d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 637d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 638d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 639d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 640d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 641d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 642d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 643d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 644d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 645d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 646d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 647d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 648d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 649d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 650d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 651d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 652d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 653d1c3fb1fSNishanth Aravamudan */ 654d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 655a5516438SAndi Kleen if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 656d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 657d1c3fb1fSNishanth Aravamudan return NULL; 658d1c3fb1fSNishanth Aravamudan } else { 659a5516438SAndi Kleen h->nr_huge_pages++; 660a5516438SAndi Kleen h->surplus_huge_pages++; 661d1c3fb1fSNishanth Aravamudan } 662d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 663d1c3fb1fSNishanth Aravamudan 664551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 665551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 666a5516438SAndi Kleen huge_page_order(h)); 667d1c3fb1fSNishanth Aravamudan 668caff3a2cSGerald Schaefer if (page && arch_prepare_hugepage(page)) { 669caff3a2cSGerald Schaefer __free_pages(page, huge_page_order(h)); 670caff3a2cSGerald Schaefer return NULL; 671caff3a2cSGerald Schaefer } 672caff3a2cSGerald Schaefer 6737893d1d5SAdam Litke spin_lock(&hugetlb_lock); 674d1c3fb1fSNishanth Aravamudan if (page) { 6752668db91SAdam Litke /* 6762668db91SAdam Litke * This page is now managed by the hugetlb allocator and has 6772668db91SAdam Litke * no users -- drop the buddy allocator's reference. 6782668db91SAdam Litke */ 6792668db91SAdam Litke put_page_testzero(page); 6802668db91SAdam Litke VM_BUG_ON(page_count(page)); 681d1c3fb1fSNishanth Aravamudan nid = page_to_nid(page); 682d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 683d1c3fb1fSNishanth Aravamudan /* 684d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 685d1c3fb1fSNishanth Aravamudan */ 686a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 687a5516438SAndi Kleen h->surplus_huge_pages_node[nid]++; 6883b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 689d1c3fb1fSNishanth Aravamudan } else { 690a5516438SAndi Kleen h->nr_huge_pages--; 691a5516438SAndi Kleen h->surplus_huge_pages--; 6923b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 6937893d1d5SAdam Litke } 694d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 6957893d1d5SAdam Litke 6967893d1d5SAdam Litke return page; 6977893d1d5SAdam Litke } 6987893d1d5SAdam Litke 699e4e574b7SAdam Litke /* 700e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 701e4e574b7SAdam Litke * of size 'delta'. 702e4e574b7SAdam Litke */ 703a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta) 704e4e574b7SAdam Litke { 705e4e574b7SAdam Litke struct list_head surplus_list; 706e4e574b7SAdam Litke struct page *page, *tmp; 707e4e574b7SAdam Litke int ret, i; 708e4e574b7SAdam Litke int needed, allocated; 709e4e574b7SAdam Litke 710a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 711ac09b3a1SAdam Litke if (needed <= 0) { 712a5516438SAndi Kleen h->resv_huge_pages += delta; 713e4e574b7SAdam Litke return 0; 714ac09b3a1SAdam Litke } 715e4e574b7SAdam Litke 716e4e574b7SAdam Litke allocated = 0; 717e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 718e4e574b7SAdam Litke 719e4e574b7SAdam Litke ret = -ENOMEM; 720e4e574b7SAdam Litke retry: 721e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 722e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 723a5516438SAndi Kleen page = alloc_buddy_huge_page(h, NULL, 0); 724e4e574b7SAdam Litke if (!page) { 725e4e574b7SAdam Litke /* 726e4e574b7SAdam Litke * We were not able to allocate enough pages to 727e4e574b7SAdam Litke * satisfy the entire reservation so we free what 728e4e574b7SAdam Litke * we've allocated so far. 729e4e574b7SAdam Litke */ 730e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 731e4e574b7SAdam Litke needed = 0; 732e4e574b7SAdam Litke goto free; 733e4e574b7SAdam Litke } 734e4e574b7SAdam Litke 735e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 736e4e574b7SAdam Litke } 737e4e574b7SAdam Litke allocated += needed; 738e4e574b7SAdam Litke 739e4e574b7SAdam Litke /* 740e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 741e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 742e4e574b7SAdam Litke */ 743e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 744a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - 745a5516438SAndi Kleen (h->free_huge_pages + allocated); 746e4e574b7SAdam Litke if (needed > 0) 747e4e574b7SAdam Litke goto retry; 748e4e574b7SAdam Litke 749e4e574b7SAdam Litke /* 750e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 751e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 752e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 753ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 754ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 755ac09b3a1SAdam Litke * before they are reserved. 756e4e574b7SAdam Litke */ 757e4e574b7SAdam Litke needed += allocated; 758a5516438SAndi Kleen h->resv_huge_pages += delta; 759e4e574b7SAdam Litke ret = 0; 760e4e574b7SAdam Litke free: 76119fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 76219fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 76319fc3f0aSAdam Litke if ((--needed) < 0) 76419fc3f0aSAdam Litke break; 76519fc3f0aSAdam Litke list_del(&page->lru); 766a5516438SAndi Kleen enqueue_huge_page(h, page); 76719fc3f0aSAdam Litke } 76819fc3f0aSAdam Litke 76919fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 77019fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 77119fc3f0aSAdam Litke spin_unlock(&hugetlb_lock); 772e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 773e4e574b7SAdam Litke list_del(&page->lru); 774af767cbdSAdam Litke /* 7752668db91SAdam Litke * The page has a reference count of zero already, so 7762668db91SAdam Litke * call free_huge_page directly instead of using 7772668db91SAdam Litke * put_page. This must be done with hugetlb_lock 778af767cbdSAdam Litke * unlocked which is safe because free_huge_page takes 779af767cbdSAdam Litke * hugetlb_lock before deciding how to free the page. 780af767cbdSAdam Litke */ 7812668db91SAdam Litke free_huge_page(page); 782af767cbdSAdam Litke } 78319fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 784e4e574b7SAdam Litke } 785e4e574b7SAdam Litke 786e4e574b7SAdam Litke return ret; 787e4e574b7SAdam Litke } 788e4e574b7SAdam Litke 789e4e574b7SAdam Litke /* 790e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 791e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 792e4e574b7SAdam Litke * never used. 793e4e574b7SAdam Litke */ 794a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h, 795a5516438SAndi Kleen unsigned long unused_resv_pages) 796e4e574b7SAdam Litke { 797e4e574b7SAdam Litke static int nid = -1; 798e4e574b7SAdam Litke struct page *page; 799e4e574b7SAdam Litke unsigned long nr_pages; 800e4e574b7SAdam Litke 80111320d17SNishanth Aravamudan /* 80211320d17SNishanth Aravamudan * We want to release as many surplus pages as possible, spread 80311320d17SNishanth Aravamudan * evenly across all nodes. Iterate across all nodes until we 80411320d17SNishanth Aravamudan * can no longer free unreserved surplus pages. This occurs when 80511320d17SNishanth Aravamudan * the nodes with surplus pages have no free pages. 80611320d17SNishanth Aravamudan */ 80711320d17SNishanth Aravamudan unsigned long remaining_iterations = num_online_nodes(); 80811320d17SNishanth Aravamudan 809ac09b3a1SAdam Litke /* Uncommit the reservation */ 810a5516438SAndi Kleen h->resv_huge_pages -= unused_resv_pages; 811ac09b3a1SAdam Litke 812aa888a74SAndi Kleen /* Cannot return gigantic pages currently */ 813aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 814aa888a74SAndi Kleen return; 815aa888a74SAndi Kleen 816a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 817e4e574b7SAdam Litke 81811320d17SNishanth Aravamudan while (remaining_iterations-- && nr_pages) { 819e4e574b7SAdam Litke nid = next_node(nid, node_online_map); 820e4e574b7SAdam Litke if (nid == MAX_NUMNODES) 821e4e574b7SAdam Litke nid = first_node(node_online_map); 822e4e574b7SAdam Litke 823a5516438SAndi Kleen if (!h->surplus_huge_pages_node[nid]) 824e4e574b7SAdam Litke continue; 825e4e574b7SAdam Litke 826a5516438SAndi Kleen if (!list_empty(&h->hugepage_freelists[nid])) { 827a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 828e4e574b7SAdam Litke struct page, lru); 829e4e574b7SAdam Litke list_del(&page->lru); 830a5516438SAndi Kleen update_and_free_page(h, page); 831a5516438SAndi Kleen h->free_huge_pages--; 832a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 833a5516438SAndi Kleen h->surplus_huge_pages--; 834a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 835e4e574b7SAdam Litke nr_pages--; 83611320d17SNishanth Aravamudan remaining_iterations = num_online_nodes(); 837e4e574b7SAdam Litke } 838e4e574b7SAdam Litke } 839e4e574b7SAdam Litke } 840e4e574b7SAdam Litke 841c37f9fb1SAndy Whitcroft /* 842c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 843c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 844c37f9fb1SAndy Whitcroft * reservation and actually increase quota before an allocation can occur. 845c37f9fb1SAndy Whitcroft * Where any new reservation would be required the reservation change is 846c37f9fb1SAndy Whitcroft * prepared, but not committed. Once the page has been quota'd allocated 847c37f9fb1SAndy Whitcroft * an instantiated the change should be committed via vma_commit_reservation. 848c37f9fb1SAndy Whitcroft * No action is required on failure. 849c37f9fb1SAndy Whitcroft */ 850a5516438SAndi Kleen static int vma_needs_reservation(struct hstate *h, 851a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 852c37f9fb1SAndy Whitcroft { 853c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 854c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 855c37f9fb1SAndy Whitcroft 856c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 857a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 858c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 859c37f9fb1SAndy Whitcroft idx, idx + 1); 860c37f9fb1SAndy Whitcroft 86184afd99bSAndy Whitcroft } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 862c37f9fb1SAndy Whitcroft return 1; 863c37f9fb1SAndy Whitcroft 86484afd99bSAndy Whitcroft } else { 86584afd99bSAndy Whitcroft int err; 866a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 86784afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 86884afd99bSAndy Whitcroft 86984afd99bSAndy Whitcroft err = region_chg(&reservations->regions, idx, idx + 1); 87084afd99bSAndy Whitcroft if (err < 0) 87184afd99bSAndy Whitcroft return err; 872c37f9fb1SAndy Whitcroft return 0; 873c37f9fb1SAndy Whitcroft } 87484afd99bSAndy Whitcroft } 875a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h, 876a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 877c37f9fb1SAndy Whitcroft { 878c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 879c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 880c37f9fb1SAndy Whitcroft 881c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 882a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 883c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 88484afd99bSAndy Whitcroft 88584afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 886a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 88784afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 88884afd99bSAndy Whitcroft 88984afd99bSAndy Whitcroft /* Mark this page used in the map. */ 89084afd99bSAndy Whitcroft region_add(&reservations->regions, idx, idx + 1); 891c37f9fb1SAndy Whitcroft } 892c37f9fb1SAndy Whitcroft } 893c37f9fb1SAndy Whitcroft 894348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 89504f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 896348ea204SAdam Litke { 897a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 898348ea204SAdam Litke struct page *page; 8992fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 900a1e78772SMel Gorman struct inode *inode = mapping->host; 901c37f9fb1SAndy Whitcroft unsigned int chg; 9022fc39cecSAdam Litke 903a1e78772SMel Gorman /* 904a1e78772SMel Gorman * Processes that did not create the mapping will have no reserves and 905a1e78772SMel Gorman * will not have accounted against quota. Check that the quota can be 906a1e78772SMel Gorman * made before satisfying the allocation 907c37f9fb1SAndy Whitcroft * MAP_NORESERVE mappings may also need pages and quota allocated 908c37f9fb1SAndy Whitcroft * if no reserve mapping overlaps. 909a1e78772SMel Gorman */ 910a5516438SAndi Kleen chg = vma_needs_reservation(h, vma, addr); 911c37f9fb1SAndy Whitcroft if (chg < 0) 912c37f9fb1SAndy Whitcroft return ERR_PTR(chg); 913c37f9fb1SAndy Whitcroft if (chg) 914a1e78772SMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 915a1e78772SMel Gorman return ERR_PTR(-ENOSPC); 91690d8b7e6SAdam Litke 917a1e78772SMel Gorman spin_lock(&hugetlb_lock); 918a5516438SAndi Kleen page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 919a1e78772SMel Gorman spin_unlock(&hugetlb_lock); 920a1e78772SMel Gorman 921a1e78772SMel Gorman if (!page) { 922a5516438SAndi Kleen page = alloc_buddy_huge_page(h, vma, addr); 923a1e78772SMel Gorman if (!page) { 924a1e78772SMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 925a1e78772SMel Gorman return ERR_PTR(-VM_FAULT_OOM); 926a1e78772SMel Gorman } 927a1e78772SMel Gorman } 928a1e78772SMel Gorman 929348ea204SAdam Litke set_page_refcounted(page); 9302fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 931a1e78772SMel Gorman 932a5516438SAndi Kleen vma_commit_reservation(h, vma, addr); 933c37f9fb1SAndy Whitcroft 9347893d1d5SAdam Litke return page; 935b45b5bd6SDavid Gibson } 936b45b5bd6SDavid Gibson 93753ba51d2SJon Tollefson __attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h) 938aa888a74SAndi Kleen { 939aa888a74SAndi Kleen struct huge_bootmem_page *m; 940aa888a74SAndi Kleen int nr_nodes = nodes_weight(node_online_map); 941aa888a74SAndi Kleen 942aa888a74SAndi Kleen while (nr_nodes) { 943aa888a74SAndi Kleen void *addr; 944aa888a74SAndi Kleen 945aa888a74SAndi Kleen addr = __alloc_bootmem_node_nopanic( 946aa888a74SAndi Kleen NODE_DATA(h->hugetlb_next_nid), 947aa888a74SAndi Kleen huge_page_size(h), huge_page_size(h), 0); 948aa888a74SAndi Kleen 949aa888a74SAndi Kleen if (addr) { 950aa888a74SAndi Kleen /* 951aa888a74SAndi Kleen * Use the beginning of the huge page to store the 952aa888a74SAndi Kleen * huge_bootmem_page struct (until gather_bootmem 953aa888a74SAndi Kleen * puts them into the mem_map). 954aa888a74SAndi Kleen */ 955aa888a74SAndi Kleen m = addr; 956aa888a74SAndi Kleen if (m) 957aa888a74SAndi Kleen goto found; 958aa888a74SAndi Kleen } 959aa888a74SAndi Kleen hstate_next_node(h); 960aa888a74SAndi Kleen nr_nodes--; 961aa888a74SAndi Kleen } 962aa888a74SAndi Kleen return 0; 963aa888a74SAndi Kleen 964aa888a74SAndi Kleen found: 965aa888a74SAndi Kleen BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1)); 966aa888a74SAndi Kleen /* Put them into a private list first because mem_map is not up yet */ 967aa888a74SAndi Kleen list_add(&m->list, &huge_boot_pages); 968aa888a74SAndi Kleen m->hstate = h; 969aa888a74SAndi Kleen return 1; 970aa888a74SAndi Kleen } 971aa888a74SAndi Kleen 972aa888a74SAndi Kleen /* Put bootmem huge pages into the standard lists after mem_map is up */ 973aa888a74SAndi Kleen static void __init gather_bootmem_prealloc(void) 974aa888a74SAndi Kleen { 975aa888a74SAndi Kleen struct huge_bootmem_page *m; 976aa888a74SAndi Kleen 977aa888a74SAndi Kleen list_for_each_entry(m, &huge_boot_pages, list) { 978aa888a74SAndi Kleen struct page *page = virt_to_page(m); 979aa888a74SAndi Kleen struct hstate *h = m->hstate; 980aa888a74SAndi Kleen __ClearPageReserved(page); 981aa888a74SAndi Kleen WARN_ON(page_count(page) != 1); 982aa888a74SAndi Kleen prep_compound_page(page, h->order); 983aa888a74SAndi Kleen prep_new_huge_page(h, page, page_to_nid(page)); 984aa888a74SAndi Kleen } 985aa888a74SAndi Kleen } 986aa888a74SAndi Kleen 9878faa8b07SAndi Kleen static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 9881da177e4SLinus Torvalds { 9891da177e4SLinus Torvalds unsigned long i; 9901da177e4SLinus Torvalds 991e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) { 992aa888a74SAndi Kleen if (h->order >= MAX_ORDER) { 993aa888a74SAndi Kleen if (!alloc_bootmem_huge_page(h)) 994aa888a74SAndi Kleen break; 995aa888a74SAndi Kleen } else if (!alloc_fresh_huge_page(h)) 9961da177e4SLinus Torvalds break; 9971da177e4SLinus Torvalds } 9988faa8b07SAndi Kleen h->max_huge_pages = i; 999e5ff2159SAndi Kleen } 1000e5ff2159SAndi Kleen 1001e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void) 1002e5ff2159SAndi Kleen { 1003e5ff2159SAndi Kleen struct hstate *h; 1004e5ff2159SAndi Kleen 1005e5ff2159SAndi Kleen for_each_hstate(h) { 10068faa8b07SAndi Kleen /* oversize hugepages were init'ed in early boot */ 10078faa8b07SAndi Kleen if (h->order < MAX_ORDER) 10088faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(h); 1009e5ff2159SAndi Kleen } 1010e5ff2159SAndi Kleen } 1011e5ff2159SAndi Kleen 10124abd32dbSAndi Kleen static char * __init memfmt(char *buf, unsigned long n) 10134abd32dbSAndi Kleen { 10144abd32dbSAndi Kleen if (n >= (1UL << 30)) 10154abd32dbSAndi Kleen sprintf(buf, "%lu GB", n >> 30); 10164abd32dbSAndi Kleen else if (n >= (1UL << 20)) 10174abd32dbSAndi Kleen sprintf(buf, "%lu MB", n >> 20); 10184abd32dbSAndi Kleen else 10194abd32dbSAndi Kleen sprintf(buf, "%lu KB", n >> 10); 10204abd32dbSAndi Kleen return buf; 10214abd32dbSAndi Kleen } 10224abd32dbSAndi Kleen 1023e5ff2159SAndi Kleen static void __init report_hugepages(void) 1024e5ff2159SAndi Kleen { 1025e5ff2159SAndi Kleen struct hstate *h; 1026e5ff2159SAndi Kleen 1027e5ff2159SAndi Kleen for_each_hstate(h) { 10284abd32dbSAndi Kleen char buf[32]; 10294abd32dbSAndi Kleen printk(KERN_INFO "HugeTLB registered %s page size, " 10304abd32dbSAndi Kleen "pre-allocated %ld pages\n", 10314abd32dbSAndi Kleen memfmt(buf, huge_page_size(h)), 10324abd32dbSAndi Kleen h->free_huge_pages); 1033e5ff2159SAndi Kleen } 1034e5ff2159SAndi Kleen } 1035e5ff2159SAndi Kleen 10361da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 1037a5516438SAndi Kleen static void try_to_free_low(struct hstate *h, unsigned long count) 10381da177e4SLinus Torvalds { 10394415cc8dSChristoph Lameter int i; 10404415cc8dSChristoph Lameter 1041aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1042aa888a74SAndi Kleen return; 1043aa888a74SAndi Kleen 10441da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 10451da177e4SLinus Torvalds struct page *page, *next; 1046a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i]; 1047a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) { 1048a5516438SAndi Kleen if (count >= h->nr_huge_pages) 10496b0c880dSAdam Litke return; 10501da177e4SLinus Torvalds if (PageHighMem(page)) 10511da177e4SLinus Torvalds continue; 10521da177e4SLinus Torvalds list_del(&page->lru); 1053e5ff2159SAndi Kleen update_and_free_page(h, page); 1054a5516438SAndi Kleen h->free_huge_pages--; 1055a5516438SAndi Kleen h->free_huge_pages_node[page_to_nid(page)]--; 10561da177e4SLinus Torvalds } 10571da177e4SLinus Torvalds } 10581da177e4SLinus Torvalds } 10591da177e4SLinus Torvalds #else 1060a5516438SAndi Kleen static inline void try_to_free_low(struct hstate *h, unsigned long count) 10611da177e4SLinus Torvalds { 10621da177e4SLinus Torvalds } 10631da177e4SLinus Torvalds #endif 10641da177e4SLinus Torvalds 1065a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 1066e5ff2159SAndi Kleen static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) 10671da177e4SLinus Torvalds { 10687893d1d5SAdam Litke unsigned long min_count, ret; 10691da177e4SLinus Torvalds 1070aa888a74SAndi Kleen if (h->order >= MAX_ORDER) 1071aa888a74SAndi Kleen return h->max_huge_pages; 1072aa888a74SAndi Kleen 10737893d1d5SAdam Litke /* 10747893d1d5SAdam Litke * Increase the pool size 10757893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 10767893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 1077d1c3fb1fSNishanth Aravamudan * 1078d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 1079d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 1080d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 1081d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 1082d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 10837893d1d5SAdam Litke */ 10841da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 1085a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 1086a5516438SAndi Kleen if (!adjust_pool_surplus(h, -1)) 10877893d1d5SAdam Litke break; 10887893d1d5SAdam Litke } 10897893d1d5SAdam Litke 1090a5516438SAndi Kleen while (count > persistent_huge_pages(h)) { 10917893d1d5SAdam Litke /* 10927893d1d5SAdam Litke * If this allocation races such that we no longer need the 10937893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 10947893d1d5SAdam Litke * and reducing the surplus. 10957893d1d5SAdam Litke */ 10967893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 1097a5516438SAndi Kleen ret = alloc_fresh_huge_page(h); 10987893d1d5SAdam Litke spin_lock(&hugetlb_lock); 10997893d1d5SAdam Litke if (!ret) 11007893d1d5SAdam Litke goto out; 11017893d1d5SAdam Litke 11027893d1d5SAdam Litke } 11037893d1d5SAdam Litke 11047893d1d5SAdam Litke /* 11057893d1d5SAdam Litke * Decrease the pool size 11067893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 11077893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 11087893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 11097893d1d5SAdam Litke * to the desired size as pages become free. 1110d1c3fb1fSNishanth Aravamudan * 1111d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 1112d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 1113d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 1114d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 1115d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 1116d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 1117d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 11187893d1d5SAdam Litke */ 1119a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 11206b0c880dSAdam Litke min_count = max(count, min_count); 1121a5516438SAndi Kleen try_to_free_low(h, min_count); 1122a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) { 1123a5516438SAndi Kleen struct page *page = dequeue_huge_page(h); 11241da177e4SLinus Torvalds if (!page) 11251da177e4SLinus Torvalds break; 1126a5516438SAndi Kleen update_and_free_page(h, page); 11271da177e4SLinus Torvalds } 1128a5516438SAndi Kleen while (count < persistent_huge_pages(h)) { 1129a5516438SAndi Kleen if (!adjust_pool_surplus(h, 1)) 11307893d1d5SAdam Litke break; 11317893d1d5SAdam Litke } 11327893d1d5SAdam Litke out: 1133a5516438SAndi Kleen ret = persistent_huge_pages(h); 11341da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 11357893d1d5SAdam Litke return ret; 11361da177e4SLinus Torvalds } 11371da177e4SLinus Torvalds 1138a3437870SNishanth Aravamudan #define HSTATE_ATTR_RO(_name) \ 1139a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1140a3437870SNishanth Aravamudan 1141a3437870SNishanth Aravamudan #define HSTATE_ATTR(_name) \ 1142a3437870SNishanth Aravamudan static struct kobj_attribute _name##_attr = \ 1143a3437870SNishanth Aravamudan __ATTR(_name, 0644, _name##_show, _name##_store) 1144a3437870SNishanth Aravamudan 1145a3437870SNishanth Aravamudan static struct kobject *hugepages_kobj; 1146a3437870SNishanth Aravamudan static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 1147a3437870SNishanth Aravamudan 1148a3437870SNishanth Aravamudan static struct hstate *kobj_to_hstate(struct kobject *kobj) 1149a3437870SNishanth Aravamudan { 1150a3437870SNishanth Aravamudan int i; 1151a3437870SNishanth Aravamudan for (i = 0; i < HUGE_MAX_HSTATE; i++) 1152a3437870SNishanth Aravamudan if (hstate_kobjs[i] == kobj) 1153a3437870SNishanth Aravamudan return &hstates[i]; 1154a3437870SNishanth Aravamudan BUG(); 1155a3437870SNishanth Aravamudan return NULL; 1156a3437870SNishanth Aravamudan } 1157a3437870SNishanth Aravamudan 1158a3437870SNishanth Aravamudan static ssize_t nr_hugepages_show(struct kobject *kobj, 1159a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1160a3437870SNishanth Aravamudan { 1161a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1162a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_huge_pages); 1163a3437870SNishanth Aravamudan } 1164a3437870SNishanth Aravamudan static ssize_t nr_hugepages_store(struct kobject *kobj, 1165a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1166a3437870SNishanth Aravamudan { 1167a3437870SNishanth Aravamudan int err; 1168a3437870SNishanth Aravamudan unsigned long input; 1169a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1170a3437870SNishanth Aravamudan 1171a3437870SNishanth Aravamudan err = strict_strtoul(buf, 10, &input); 1172a3437870SNishanth Aravamudan if (err) 1173a3437870SNishanth Aravamudan return 0; 1174a3437870SNishanth Aravamudan 1175a3437870SNishanth Aravamudan h->max_huge_pages = set_max_huge_pages(h, input); 1176a3437870SNishanth Aravamudan 1177a3437870SNishanth Aravamudan return count; 1178a3437870SNishanth Aravamudan } 1179a3437870SNishanth Aravamudan HSTATE_ATTR(nr_hugepages); 1180a3437870SNishanth Aravamudan 1181a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 1182a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1183a3437870SNishanth Aravamudan { 1184a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1185a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); 1186a3437870SNishanth Aravamudan } 1187a3437870SNishanth Aravamudan static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 1188a3437870SNishanth Aravamudan struct kobj_attribute *attr, const char *buf, size_t count) 1189a3437870SNishanth Aravamudan { 1190a3437870SNishanth Aravamudan int err; 1191a3437870SNishanth Aravamudan unsigned long input; 1192a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1193a3437870SNishanth Aravamudan 1194a3437870SNishanth Aravamudan err = strict_strtoul(buf, 10, &input); 1195a3437870SNishanth Aravamudan if (err) 1196a3437870SNishanth Aravamudan return 0; 1197a3437870SNishanth Aravamudan 1198a3437870SNishanth Aravamudan spin_lock(&hugetlb_lock); 1199a3437870SNishanth Aravamudan h->nr_overcommit_huge_pages = input; 1200a3437870SNishanth Aravamudan spin_unlock(&hugetlb_lock); 1201a3437870SNishanth Aravamudan 1202a3437870SNishanth Aravamudan return count; 1203a3437870SNishanth Aravamudan } 1204a3437870SNishanth Aravamudan HSTATE_ATTR(nr_overcommit_hugepages); 1205a3437870SNishanth Aravamudan 1206a3437870SNishanth Aravamudan static ssize_t free_hugepages_show(struct kobject *kobj, 1207a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1208a3437870SNishanth Aravamudan { 1209a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1210a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->free_huge_pages); 1211a3437870SNishanth Aravamudan } 1212a3437870SNishanth Aravamudan HSTATE_ATTR_RO(free_hugepages); 1213a3437870SNishanth Aravamudan 1214a3437870SNishanth Aravamudan static ssize_t resv_hugepages_show(struct kobject *kobj, 1215a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1216a3437870SNishanth Aravamudan { 1217a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1218a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->resv_huge_pages); 1219a3437870SNishanth Aravamudan } 1220a3437870SNishanth Aravamudan HSTATE_ATTR_RO(resv_hugepages); 1221a3437870SNishanth Aravamudan 1222a3437870SNishanth Aravamudan static ssize_t surplus_hugepages_show(struct kobject *kobj, 1223a3437870SNishanth Aravamudan struct kobj_attribute *attr, char *buf) 1224a3437870SNishanth Aravamudan { 1225a3437870SNishanth Aravamudan struct hstate *h = kobj_to_hstate(kobj); 1226a3437870SNishanth Aravamudan return sprintf(buf, "%lu\n", h->surplus_huge_pages); 1227a3437870SNishanth Aravamudan } 1228a3437870SNishanth Aravamudan HSTATE_ATTR_RO(surplus_hugepages); 1229a3437870SNishanth Aravamudan 1230a3437870SNishanth Aravamudan static struct attribute *hstate_attrs[] = { 1231a3437870SNishanth Aravamudan &nr_hugepages_attr.attr, 1232a3437870SNishanth Aravamudan &nr_overcommit_hugepages_attr.attr, 1233a3437870SNishanth Aravamudan &free_hugepages_attr.attr, 1234a3437870SNishanth Aravamudan &resv_hugepages_attr.attr, 1235a3437870SNishanth Aravamudan &surplus_hugepages_attr.attr, 1236a3437870SNishanth Aravamudan NULL, 1237a3437870SNishanth Aravamudan }; 1238a3437870SNishanth Aravamudan 1239a3437870SNishanth Aravamudan static struct attribute_group hstate_attr_group = { 1240a3437870SNishanth Aravamudan .attrs = hstate_attrs, 1241a3437870SNishanth Aravamudan }; 1242a3437870SNishanth Aravamudan 1243a3437870SNishanth Aravamudan static int __init hugetlb_sysfs_add_hstate(struct hstate *h) 1244a3437870SNishanth Aravamudan { 1245a3437870SNishanth Aravamudan int retval; 1246a3437870SNishanth Aravamudan 1247a3437870SNishanth Aravamudan hstate_kobjs[h - hstates] = kobject_create_and_add(h->name, 1248a3437870SNishanth Aravamudan hugepages_kobj); 1249a3437870SNishanth Aravamudan if (!hstate_kobjs[h - hstates]) 1250a3437870SNishanth Aravamudan return -ENOMEM; 1251a3437870SNishanth Aravamudan 1252a3437870SNishanth Aravamudan retval = sysfs_create_group(hstate_kobjs[h - hstates], 1253a3437870SNishanth Aravamudan &hstate_attr_group); 1254a3437870SNishanth Aravamudan if (retval) 1255a3437870SNishanth Aravamudan kobject_put(hstate_kobjs[h - hstates]); 1256a3437870SNishanth Aravamudan 1257a3437870SNishanth Aravamudan return retval; 1258a3437870SNishanth Aravamudan } 1259a3437870SNishanth Aravamudan 1260a3437870SNishanth Aravamudan static void __init hugetlb_sysfs_init(void) 1261a3437870SNishanth Aravamudan { 1262a3437870SNishanth Aravamudan struct hstate *h; 1263a3437870SNishanth Aravamudan int err; 1264a3437870SNishanth Aravamudan 1265a3437870SNishanth Aravamudan hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 1266a3437870SNishanth Aravamudan if (!hugepages_kobj) 1267a3437870SNishanth Aravamudan return; 1268a3437870SNishanth Aravamudan 1269a3437870SNishanth Aravamudan for_each_hstate(h) { 1270a3437870SNishanth Aravamudan err = hugetlb_sysfs_add_hstate(h); 1271a3437870SNishanth Aravamudan if (err) 1272a3437870SNishanth Aravamudan printk(KERN_ERR "Hugetlb: Unable to add hstate %s", 1273a3437870SNishanth Aravamudan h->name); 1274a3437870SNishanth Aravamudan } 1275a3437870SNishanth Aravamudan } 1276a3437870SNishanth Aravamudan 1277a3437870SNishanth Aravamudan static void __exit hugetlb_exit(void) 1278a3437870SNishanth Aravamudan { 1279a3437870SNishanth Aravamudan struct hstate *h; 1280a3437870SNishanth Aravamudan 1281a3437870SNishanth Aravamudan for_each_hstate(h) { 1282a3437870SNishanth Aravamudan kobject_put(hstate_kobjs[h - hstates]); 1283a3437870SNishanth Aravamudan } 1284a3437870SNishanth Aravamudan 1285a3437870SNishanth Aravamudan kobject_put(hugepages_kobj); 1286a3437870SNishanth Aravamudan } 1287a3437870SNishanth Aravamudan module_exit(hugetlb_exit); 1288a3437870SNishanth Aravamudan 1289a3437870SNishanth Aravamudan static int __init hugetlb_init(void) 1290a3437870SNishanth Aravamudan { 12910ef89d25SBenjamin Herrenschmidt /* Some platform decide whether they support huge pages at boot 12920ef89d25SBenjamin Herrenschmidt * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 12930ef89d25SBenjamin Herrenschmidt * there is no such support 12940ef89d25SBenjamin Herrenschmidt */ 12950ef89d25SBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 12960ef89d25SBenjamin Herrenschmidt return 0; 1297a3437870SNishanth Aravamudan 1298e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) { 1299e11bfbfcSNick Piggin default_hstate_size = HPAGE_SIZE; 1300e11bfbfcSNick Piggin if (!size_to_hstate(default_hstate_size)) 1301a3437870SNishanth Aravamudan hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 1302a3437870SNishanth Aravamudan } 1303e11bfbfcSNick Piggin default_hstate_idx = size_to_hstate(default_hstate_size) - hstates; 1304e11bfbfcSNick Piggin if (default_hstate_max_huge_pages) 1305e11bfbfcSNick Piggin default_hstate.max_huge_pages = default_hstate_max_huge_pages; 1306a3437870SNishanth Aravamudan 1307a3437870SNishanth Aravamudan hugetlb_init_hstates(); 1308a3437870SNishanth Aravamudan 1309aa888a74SAndi Kleen gather_bootmem_prealloc(); 1310aa888a74SAndi Kleen 1311a3437870SNishanth Aravamudan report_hugepages(); 1312a3437870SNishanth Aravamudan 1313a3437870SNishanth Aravamudan hugetlb_sysfs_init(); 1314a3437870SNishanth Aravamudan 1315a3437870SNishanth Aravamudan return 0; 1316a3437870SNishanth Aravamudan } 1317a3437870SNishanth Aravamudan module_init(hugetlb_init); 1318a3437870SNishanth Aravamudan 1319a3437870SNishanth Aravamudan /* Should be called on processing a hugepagesz=... option */ 1320a3437870SNishanth Aravamudan void __init hugetlb_add_hstate(unsigned order) 1321a3437870SNishanth Aravamudan { 1322a3437870SNishanth Aravamudan struct hstate *h; 13238faa8b07SAndi Kleen unsigned long i; 13248faa8b07SAndi Kleen 1325a3437870SNishanth Aravamudan if (size_to_hstate(PAGE_SIZE << order)) { 1326a3437870SNishanth Aravamudan printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); 1327a3437870SNishanth Aravamudan return; 1328a3437870SNishanth Aravamudan } 1329a3437870SNishanth Aravamudan BUG_ON(max_hstate >= HUGE_MAX_HSTATE); 1330a3437870SNishanth Aravamudan BUG_ON(order == 0); 1331a3437870SNishanth Aravamudan h = &hstates[max_hstate++]; 1332a3437870SNishanth Aravamudan h->order = order; 1333a3437870SNishanth Aravamudan h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 13348faa8b07SAndi Kleen h->nr_huge_pages = 0; 13358faa8b07SAndi Kleen h->free_huge_pages = 0; 13368faa8b07SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i) 13378faa8b07SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]); 13388faa8b07SAndi Kleen h->hugetlb_next_nid = first_node(node_online_map); 1339a3437870SNishanth Aravamudan snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 1340a3437870SNishanth Aravamudan huge_page_size(h)/1024); 13418faa8b07SAndi Kleen 1342a3437870SNishanth Aravamudan parsed_hstate = h; 1343a3437870SNishanth Aravamudan } 1344a3437870SNishanth Aravamudan 1345e11bfbfcSNick Piggin static int __init hugetlb_nrpages_setup(char *s) 1346a3437870SNishanth Aravamudan { 1347a3437870SNishanth Aravamudan unsigned long *mhp; 13488faa8b07SAndi Kleen static unsigned long *last_mhp; 1349a3437870SNishanth Aravamudan 1350a3437870SNishanth Aravamudan /* 1351a3437870SNishanth Aravamudan * !max_hstate means we haven't parsed a hugepagesz= parameter yet, 1352a3437870SNishanth Aravamudan * so this hugepages= parameter goes to the "default hstate". 1353a3437870SNishanth Aravamudan */ 1354a3437870SNishanth Aravamudan if (!max_hstate) 1355a3437870SNishanth Aravamudan mhp = &default_hstate_max_huge_pages; 1356a3437870SNishanth Aravamudan else 1357a3437870SNishanth Aravamudan mhp = &parsed_hstate->max_huge_pages; 1358a3437870SNishanth Aravamudan 13598faa8b07SAndi Kleen if (mhp == last_mhp) { 13608faa8b07SAndi Kleen printk(KERN_WARNING "hugepages= specified twice without " 13618faa8b07SAndi Kleen "interleaving hugepagesz=, ignoring\n"); 13628faa8b07SAndi Kleen return 1; 13638faa8b07SAndi Kleen } 13648faa8b07SAndi Kleen 1365a3437870SNishanth Aravamudan if (sscanf(s, "%lu", mhp) <= 0) 1366a3437870SNishanth Aravamudan *mhp = 0; 1367a3437870SNishanth Aravamudan 13688faa8b07SAndi Kleen /* 13698faa8b07SAndi Kleen * Global state is always initialized later in hugetlb_init. 13708faa8b07SAndi Kleen * But we need to allocate >= MAX_ORDER hstates here early to still 13718faa8b07SAndi Kleen * use the bootmem allocator. 13728faa8b07SAndi Kleen */ 13738faa8b07SAndi Kleen if (max_hstate && parsed_hstate->order >= MAX_ORDER) 13748faa8b07SAndi Kleen hugetlb_hstate_alloc_pages(parsed_hstate); 13758faa8b07SAndi Kleen 13768faa8b07SAndi Kleen last_mhp = mhp; 13778faa8b07SAndi Kleen 1378a3437870SNishanth Aravamudan return 1; 1379a3437870SNishanth Aravamudan } 1380e11bfbfcSNick Piggin __setup("hugepages=", hugetlb_nrpages_setup); 1381e11bfbfcSNick Piggin 1382e11bfbfcSNick Piggin static int __init hugetlb_default_setup(char *s) 1383e11bfbfcSNick Piggin { 1384e11bfbfcSNick Piggin default_hstate_size = memparse(s, &s); 1385e11bfbfcSNick Piggin return 1; 1386e11bfbfcSNick Piggin } 1387e11bfbfcSNick Piggin __setup("default_hugepagesz=", hugetlb_default_setup); 1388a3437870SNishanth Aravamudan 13898a213460SNishanth Aravamudan static unsigned int cpuset_mems_nr(unsigned int *array) 13908a213460SNishanth Aravamudan { 13918a213460SNishanth Aravamudan int node; 13928a213460SNishanth Aravamudan unsigned int nr = 0; 13938a213460SNishanth Aravamudan 13948a213460SNishanth Aravamudan for_each_node_mask(node, cpuset_current_mems_allowed) 13958a213460SNishanth Aravamudan nr += array[node]; 13968a213460SNishanth Aravamudan 13978a213460SNishanth Aravamudan return nr; 13988a213460SNishanth Aravamudan } 13998a213460SNishanth Aravamudan 14008a213460SNishanth Aravamudan #ifdef CONFIG_SYSCTL 14011da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 14021da177e4SLinus Torvalds struct file *file, void __user *buffer, 14031da177e4SLinus Torvalds size_t *length, loff_t *ppos) 14041da177e4SLinus Torvalds { 1405e5ff2159SAndi Kleen struct hstate *h = &default_hstate; 1406e5ff2159SAndi Kleen unsigned long tmp; 1407e5ff2159SAndi Kleen 1408e5ff2159SAndi Kleen if (!write) 1409e5ff2159SAndi Kleen tmp = h->max_huge_pages; 1410e5ff2159SAndi Kleen 1411e5ff2159SAndi Kleen table->data = &tmp; 1412e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 14131da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 1414e5ff2159SAndi Kleen 1415e5ff2159SAndi Kleen if (write) 1416e5ff2159SAndi Kleen h->max_huge_pages = set_max_huge_pages(h, tmp); 1417e5ff2159SAndi Kleen 14181da177e4SLinus Torvalds return 0; 14191da177e4SLinus Torvalds } 1420396faf03SMel Gorman 1421396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 1422396faf03SMel Gorman struct file *file, void __user *buffer, 1423396faf03SMel Gorman size_t *length, loff_t *ppos) 1424396faf03SMel Gorman { 1425396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 1426396faf03SMel Gorman if (hugepages_treat_as_movable) 1427396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 1428396faf03SMel Gorman else 1429396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 1430396faf03SMel Gorman return 0; 1431396faf03SMel Gorman } 1432396faf03SMel Gorman 1433a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 1434a3d0c6aaSNishanth Aravamudan struct file *file, void __user *buffer, 1435a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 1436a3d0c6aaSNishanth Aravamudan { 1437a5516438SAndi Kleen struct hstate *h = &default_hstate; 1438e5ff2159SAndi Kleen unsigned long tmp; 1439e5ff2159SAndi Kleen 1440e5ff2159SAndi Kleen if (!write) 1441e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages; 1442e5ff2159SAndi Kleen 1443e5ff2159SAndi Kleen table->data = &tmp; 1444e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 1445a3d0c6aaSNishanth Aravamudan proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 1446e5ff2159SAndi Kleen 1447e5ff2159SAndi Kleen if (write) { 1448064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 1449e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp; 1450a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 1451e5ff2159SAndi Kleen } 1452e5ff2159SAndi Kleen 1453a3d0c6aaSNishanth Aravamudan return 0; 1454a3d0c6aaSNishanth Aravamudan } 1455a3d0c6aaSNishanth Aravamudan 14561da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 14571da177e4SLinus Torvalds 14581da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 14591da177e4SLinus Torvalds { 1460a5516438SAndi Kleen struct hstate *h = &default_hstate; 14611da177e4SLinus Torvalds return sprintf(buf, 14621da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 14631da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 1464b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 14657893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 14664f98a2feSRik van Riel "Hugepagesize: %8lu kB\n", 1467a5516438SAndi Kleen h->nr_huge_pages, 1468a5516438SAndi Kleen h->free_huge_pages, 1469a5516438SAndi Kleen h->resv_huge_pages, 1470a5516438SAndi Kleen h->surplus_huge_pages, 1471a5516438SAndi Kleen 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 14721da177e4SLinus Torvalds } 14731da177e4SLinus Torvalds 14741da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 14751da177e4SLinus Torvalds { 1476a5516438SAndi Kleen struct hstate *h = &default_hstate; 14771da177e4SLinus Torvalds return sprintf(buf, 14781da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 1479a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 1480a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 1481a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid], 1482a5516438SAndi Kleen nid, h->free_huge_pages_node[nid], 1483a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]); 14841da177e4SLinus Torvalds } 14851da177e4SLinus Torvalds 14861da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 14871da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 14881da177e4SLinus Torvalds { 1489a5516438SAndi Kleen struct hstate *h = &default_hstate; 1490a5516438SAndi Kleen return h->nr_huge_pages * pages_per_huge_page(h); 14911da177e4SLinus Torvalds } 14921da177e4SLinus Torvalds 1493a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta) 1494fc1b8a73SMel Gorman { 1495fc1b8a73SMel Gorman int ret = -ENOMEM; 1496fc1b8a73SMel Gorman 1497fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 1498fc1b8a73SMel Gorman /* 1499fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 1500fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 1501fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 1502fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 1503fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 1504fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 1505fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 1506fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 1507fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 1508fc1b8a73SMel Gorman * 1509fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 1510fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 1511fc1b8a73SMel Gorman * we fall back to check against current free page availability as 1512fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 1513fc1b8a73SMel Gorman * semantics that cpuset has. 1514fc1b8a73SMel Gorman */ 1515fc1b8a73SMel Gorman if (delta > 0) { 1516a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0) 1517fc1b8a73SMel Gorman goto out; 1518fc1b8a73SMel Gorman 1519a5516438SAndi Kleen if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 1520a5516438SAndi Kleen return_unused_surplus_pages(h, delta); 1521fc1b8a73SMel Gorman goto out; 1522fc1b8a73SMel Gorman } 1523fc1b8a73SMel Gorman } 1524fc1b8a73SMel Gorman 1525fc1b8a73SMel Gorman ret = 0; 1526fc1b8a73SMel Gorman if (delta < 0) 1527a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta); 1528fc1b8a73SMel Gorman 1529fc1b8a73SMel Gorman out: 1530fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 1531fc1b8a73SMel Gorman return ret; 1532fc1b8a73SMel Gorman } 1533fc1b8a73SMel Gorman 153484afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 153584afd99bSAndy Whitcroft { 153684afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 153784afd99bSAndy Whitcroft 153884afd99bSAndy Whitcroft /* 153984afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 154084afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 154184afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 154284afd99bSAndy Whitcroft * has a reference to the reservation map it cannot dissappear until 154384afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 154484afd99bSAndy Whitcroft * new reference here without additional locking. 154584afd99bSAndy Whitcroft */ 154684afd99bSAndy Whitcroft if (reservations) 154784afd99bSAndy Whitcroft kref_get(&reservations->refs); 154884afd99bSAndy Whitcroft } 154984afd99bSAndy Whitcroft 1550a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 1551a1e78772SMel Gorman { 1552a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 155384afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 155484afd99bSAndy Whitcroft unsigned long reserve; 155584afd99bSAndy Whitcroft unsigned long start; 155684afd99bSAndy Whitcroft unsigned long end; 155784afd99bSAndy Whitcroft 155884afd99bSAndy Whitcroft if (reservations) { 1559a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start); 1560a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end); 156184afd99bSAndy Whitcroft 156284afd99bSAndy Whitcroft reserve = (end - start) - 156384afd99bSAndy Whitcroft region_count(&reservations->regions, start, end); 156484afd99bSAndy Whitcroft 156584afd99bSAndy Whitcroft kref_put(&reservations->refs, resv_map_release); 156684afd99bSAndy Whitcroft 15677251ff78SAdam Litke if (reserve) { 1568a5516438SAndi Kleen hugetlb_acct_memory(h, -reserve); 15697251ff78SAdam Litke hugetlb_put_quota(vma->vm_file->f_mapping, reserve); 15707251ff78SAdam Litke } 1571a1e78772SMel Gorman } 157284afd99bSAndy Whitcroft } 1573a1e78772SMel Gorman 15741da177e4SLinus Torvalds /* 15751da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 15761da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 15771da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 15781da177e4SLinus Torvalds * this far. 15791da177e4SLinus Torvalds */ 1580d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 15811da177e4SLinus Torvalds { 15821da177e4SLinus Torvalds BUG(); 1583d0217ac0SNick Piggin return 0; 15841da177e4SLinus Torvalds } 15851da177e4SLinus Torvalds 15861da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 1587d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 158884afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 1589a1e78772SMel Gorman .close = hugetlb_vm_op_close, 15901da177e4SLinus Torvalds }; 15911da177e4SLinus Torvalds 15921e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 15931e8f889bSDavid Gibson int writable) 159463551ae0SDavid Gibson { 159563551ae0SDavid Gibson pte_t entry; 159663551ae0SDavid Gibson 15971e8f889bSDavid Gibson if (writable) { 159863551ae0SDavid Gibson entry = 159963551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 160063551ae0SDavid Gibson } else { 16017f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 160263551ae0SDavid Gibson } 160363551ae0SDavid Gibson entry = pte_mkyoung(entry); 160463551ae0SDavid Gibson entry = pte_mkhuge(entry); 160563551ae0SDavid Gibson 160663551ae0SDavid Gibson return entry; 160763551ae0SDavid Gibson } 160863551ae0SDavid Gibson 16091e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 16101e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 16111e8f889bSDavid Gibson { 16121e8f889bSDavid Gibson pte_t entry; 16131e8f889bSDavid Gibson 16147f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 16157f2e9525SGerald Schaefer if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 16161e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 16171e8f889bSDavid Gibson } 16188dab5241SBenjamin Herrenschmidt } 16191e8f889bSDavid Gibson 16201e8f889bSDavid Gibson 162163551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 162263551ae0SDavid Gibson struct vm_area_struct *vma) 162363551ae0SDavid Gibson { 162463551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 162563551ae0SDavid Gibson struct page *ptepage; 16261c59827dSHugh Dickins unsigned long addr; 16271e8f889bSDavid Gibson int cow; 1628a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1629a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 16301e8f889bSDavid Gibson 16311e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 163263551ae0SDavid Gibson 1633a5516438SAndi Kleen for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 1634c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 1635c74df32cSHugh Dickins if (!src_pte) 1636c74df32cSHugh Dickins continue; 1637a5516438SAndi Kleen dst_pte = huge_pte_alloc(dst, addr, sz); 163863551ae0SDavid Gibson if (!dst_pte) 163963551ae0SDavid Gibson goto nomem; 1640c5c99429SLarry Woodman 1641c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 1642c5c99429SLarry Woodman if (dst_pte == src_pte) 1643c5c99429SLarry Woodman continue; 1644c5c99429SLarry Woodman 1645c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 164646478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 16477f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 16481e8f889bSDavid Gibson if (cow) 16497f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 16507f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 165163551ae0SDavid Gibson ptepage = pte_page(entry); 165263551ae0SDavid Gibson get_page(ptepage); 165363551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 16541c59827dSHugh Dickins } 16551c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 1656c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 165763551ae0SDavid Gibson } 165863551ae0SDavid Gibson return 0; 165963551ae0SDavid Gibson 166063551ae0SDavid Gibson nomem: 166163551ae0SDavid Gibson return -ENOMEM; 166263551ae0SDavid Gibson } 166363551ae0SDavid Gibson 1664502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 166504f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 166663551ae0SDavid Gibson { 166763551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 166863551ae0SDavid Gibson unsigned long address; 1669c7546f8fSDavid Gibson pte_t *ptep; 167063551ae0SDavid Gibson pte_t pte; 167163551ae0SDavid Gibson struct page *page; 1672fe1668aeSChen, Kenneth W struct page *tmp; 1673a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1674a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 1675a5516438SAndi Kleen 1676c0a499c2SChen, Kenneth W /* 1677c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 1678c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 1679c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 1680c0a499c2SChen, Kenneth W */ 1681fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 168263551ae0SDavid Gibson 168363551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 1684a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h)); 1685a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h)); 168663551ae0SDavid Gibson 1687cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_start(mm, start, end); 1688508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 1689a5516438SAndi Kleen for (address = start; address < end; address += sz) { 1690c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 1691c7546f8fSDavid Gibson if (!ptep) 1692c7546f8fSDavid Gibson continue; 1693c7546f8fSDavid Gibson 169439dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 169539dde65cSChen, Kenneth W continue; 169639dde65cSChen, Kenneth W 169704f2cbe3SMel Gorman /* 169804f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 169904f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 170004f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 170104f2cbe3SMel Gorman */ 170204f2cbe3SMel Gorman if (ref_page) { 170304f2cbe3SMel Gorman pte = huge_ptep_get(ptep); 170404f2cbe3SMel Gorman if (huge_pte_none(pte)) 170504f2cbe3SMel Gorman continue; 170604f2cbe3SMel Gorman page = pte_page(pte); 170704f2cbe3SMel Gorman if (page != ref_page) 170804f2cbe3SMel Gorman continue; 170904f2cbe3SMel Gorman 171004f2cbe3SMel Gorman /* 171104f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 171204f2cbe3SMel Gorman * future faults in this VMA will fail rather than 171304f2cbe3SMel Gorman * looking like data was lost 171404f2cbe3SMel Gorman */ 171504f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 171604f2cbe3SMel Gorman } 171704f2cbe3SMel Gorman 1718c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 17197f2e9525SGerald Schaefer if (huge_pte_none(pte)) 172063551ae0SDavid Gibson continue; 1721c7546f8fSDavid Gibson 172263551ae0SDavid Gibson page = pte_page(pte); 17236649a386SKen Chen if (pte_dirty(pte)) 17246649a386SKen Chen set_page_dirty(page); 1725fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 172663551ae0SDavid Gibson } 17271da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 1728508034a3SHugh Dickins flush_tlb_range(vma, start, end); 1729cddb8a5cSAndrea Arcangeli mmu_notifier_invalidate_range_end(mm, start, end); 1730fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 1731fe1668aeSChen, Kenneth W list_del(&page->lru); 1732fe1668aeSChen, Kenneth W put_page(page); 1733fe1668aeSChen, Kenneth W } 17341da177e4SLinus Torvalds } 173563551ae0SDavid Gibson 1736502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 173704f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 1738502717f4SChen, Kenneth W { 1739502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 174004f2cbe3SMel Gorman __unmap_hugepage_range(vma, start, end, ref_page); 1741502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 1742502717f4SChen, Kenneth W } 1743502717f4SChen, Kenneth W 174404f2cbe3SMel Gorman /* 174504f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 174604f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 174704f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 174804f2cbe3SMel Gorman * same region. 174904f2cbe3SMel Gorman */ 175004f2cbe3SMel Gorman int unmap_ref_private(struct mm_struct *mm, 175104f2cbe3SMel Gorman struct vm_area_struct *vma, 175204f2cbe3SMel Gorman struct page *page, 175304f2cbe3SMel Gorman unsigned long address) 175404f2cbe3SMel Gorman { 175504f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 175604f2cbe3SMel Gorman struct address_space *mapping; 175704f2cbe3SMel Gorman struct prio_tree_iter iter; 175804f2cbe3SMel Gorman pgoff_t pgoff; 175904f2cbe3SMel Gorman 176004f2cbe3SMel Gorman /* 176104f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 176204f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 176304f2cbe3SMel Gorman */ 176404f2cbe3SMel Gorman address = address & huge_page_mask(hstate_vma(vma)); 176504f2cbe3SMel Gorman pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) 176604f2cbe3SMel Gorman + (vma->vm_pgoff >> PAGE_SHIFT); 176704f2cbe3SMel Gorman mapping = (struct address_space *)page_private(page); 176804f2cbe3SMel Gorman 176904f2cbe3SMel Gorman vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 177004f2cbe3SMel Gorman /* Do not unmap the current VMA */ 177104f2cbe3SMel Gorman if (iter_vma == vma) 177204f2cbe3SMel Gorman continue; 177304f2cbe3SMel Gorman 177404f2cbe3SMel Gorman /* 177504f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 177604f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 177704f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 177804f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 177904f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 178004f2cbe3SMel Gorman */ 178104f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 178204f2cbe3SMel Gorman unmap_hugepage_range(iter_vma, 178304f2cbe3SMel Gorman address, address + HPAGE_SIZE, 178404f2cbe3SMel Gorman page); 178504f2cbe3SMel Gorman } 178604f2cbe3SMel Gorman 178704f2cbe3SMel Gorman return 1; 178804f2cbe3SMel Gorman } 178904f2cbe3SMel Gorman 17901e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 179104f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 179204f2cbe3SMel Gorman struct page *pagecache_page) 17931e8f889bSDavid Gibson { 1794a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 17951e8f889bSDavid Gibson struct page *old_page, *new_page; 179679ac6ba4SDavid Gibson int avoidcopy; 179704f2cbe3SMel Gorman int outside_reserve = 0; 17981e8f889bSDavid Gibson 17991e8f889bSDavid Gibson old_page = pte_page(pte); 18001e8f889bSDavid Gibson 180104f2cbe3SMel Gorman retry_avoidcopy: 18021e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 18031e8f889bSDavid Gibson * and just make the page writable */ 18041e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 18051e8f889bSDavid Gibson if (avoidcopy) { 18061e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 180783c54070SNick Piggin return 0; 18081e8f889bSDavid Gibson } 18091e8f889bSDavid Gibson 181004f2cbe3SMel Gorman /* 181104f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 181204f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 181304f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 181404f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 181504f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 181604f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 181704f2cbe3SMel Gorman * of the full address range. 181804f2cbe3SMel Gorman */ 181904f2cbe3SMel Gorman if (!(vma->vm_flags & VM_SHARED) && 182004f2cbe3SMel Gorman is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 182104f2cbe3SMel Gorman old_page != pagecache_page) 182204f2cbe3SMel Gorman outside_reserve = 1; 182304f2cbe3SMel Gorman 18241e8f889bSDavid Gibson page_cache_get(old_page); 182504f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 18261e8f889bSDavid Gibson 18272fc39cecSAdam Litke if (IS_ERR(new_page)) { 18281e8f889bSDavid Gibson page_cache_release(old_page); 182904f2cbe3SMel Gorman 183004f2cbe3SMel Gorman /* 183104f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 183204f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 183304f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 183404f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 183504f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 183604f2cbe3SMel Gorman */ 183704f2cbe3SMel Gorman if (outside_reserve) { 183804f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 183904f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 184004f2cbe3SMel Gorman BUG_ON(page_count(old_page) != 1); 184104f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 184204f2cbe3SMel Gorman goto retry_avoidcopy; 184304f2cbe3SMel Gorman } 184404f2cbe3SMel Gorman WARN_ON_ONCE(1); 184504f2cbe3SMel Gorman } 184604f2cbe3SMel Gorman 18472fc39cecSAdam Litke return -PTR_ERR(new_page); 18481e8f889bSDavid Gibson } 18491e8f889bSDavid Gibson 18501e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 18519de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 18520ed361deSNick Piggin __SetPageUptodate(new_page); 18531e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 18541e8f889bSDavid Gibson 1855a5516438SAndi Kleen ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 18567f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 18571e8f889bSDavid Gibson /* Break COW */ 18588fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 18591e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 18601e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 18611e8f889bSDavid Gibson /* Make the old page be freed below */ 18621e8f889bSDavid Gibson new_page = old_page; 18631e8f889bSDavid Gibson } 18641e8f889bSDavid Gibson page_cache_release(new_page); 18651e8f889bSDavid Gibson page_cache_release(old_page); 186683c54070SNick Piggin return 0; 18671e8f889bSDavid Gibson } 18681e8f889bSDavid Gibson 186904f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 1870a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h, 1871a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 187204f2cbe3SMel Gorman { 187304f2cbe3SMel Gorman struct address_space *mapping; 1874e7c4b0bfSAndy Whitcroft pgoff_t idx; 187504f2cbe3SMel Gorman 187604f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 1877a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 187804f2cbe3SMel Gorman 187904f2cbe3SMel Gorman return find_lock_page(mapping, idx); 188004f2cbe3SMel Gorman } 188104f2cbe3SMel Gorman 1882a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 18831e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 1884ac9b9c66SHugh Dickins { 1885a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1886ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 1887e7c4b0bfSAndy Whitcroft pgoff_t idx; 18884c887265SAdam Litke unsigned long size; 18894c887265SAdam Litke struct page *page; 18904c887265SAdam Litke struct address_space *mapping; 18911e8f889bSDavid Gibson pte_t new_pte; 18924c887265SAdam Litke 189304f2cbe3SMel Gorman /* 189404f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 189504f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 189604f2cbe3SMel Gorman * COW. Warn that such a situation has occured as it may not be obvious 189704f2cbe3SMel Gorman */ 189804f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 189904f2cbe3SMel Gorman printk(KERN_WARNING 190004f2cbe3SMel Gorman "PID %d killed due to inadequate hugepage pool\n", 190104f2cbe3SMel Gorman current->pid); 190204f2cbe3SMel Gorman return ret; 190304f2cbe3SMel Gorman } 190404f2cbe3SMel Gorman 19054c887265SAdam Litke mapping = vma->vm_file->f_mapping; 1906a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 19074c887265SAdam Litke 19084c887265SAdam Litke /* 19094c887265SAdam Litke * Use page lock to guard against racing truncation 19104c887265SAdam Litke * before we get page_table_lock. 19114c887265SAdam Litke */ 19126bda666aSChristoph Lameter retry: 19136bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 19146bda666aSChristoph Lameter if (!page) { 1915a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 1916ebed4bfcSHugh Dickins if (idx >= size) 1917ebed4bfcSHugh Dickins goto out; 191804f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 19192fc39cecSAdam Litke if (IS_ERR(page)) { 19202fc39cecSAdam Litke ret = -PTR_ERR(page); 19216bda666aSChristoph Lameter goto out; 19226bda666aSChristoph Lameter } 1923a5516438SAndi Kleen clear_huge_page(page, address, huge_page_size(h)); 19240ed361deSNick Piggin __SetPageUptodate(page); 1925ac9b9c66SHugh Dickins 19266bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 19276bda666aSChristoph Lameter int err; 192845c682a6SKen Chen struct inode *inode = mapping->host; 19296bda666aSChristoph Lameter 19306bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 19316bda666aSChristoph Lameter if (err) { 19326bda666aSChristoph Lameter put_page(page); 19336bda666aSChristoph Lameter if (err == -EEXIST) 19346bda666aSChristoph Lameter goto retry; 19356bda666aSChristoph Lameter goto out; 19366bda666aSChristoph Lameter } 193745c682a6SKen Chen 193845c682a6SKen Chen spin_lock(&inode->i_lock); 1939a5516438SAndi Kleen inode->i_blocks += blocks_per_huge_page(h); 194045c682a6SKen Chen spin_unlock(&inode->i_lock); 19416bda666aSChristoph Lameter } else 19426bda666aSChristoph Lameter lock_page(page); 19436bda666aSChristoph Lameter } 19441e8f889bSDavid Gibson 194557303d80SAndy Whitcroft /* 194657303d80SAndy Whitcroft * If we are going to COW a private mapping later, we examine the 194757303d80SAndy Whitcroft * pending reservations for this page now. This will ensure that 194857303d80SAndy Whitcroft * any allocations necessary to record that reservation occur outside 194957303d80SAndy Whitcroft * the spinlock. 195057303d80SAndy Whitcroft */ 195157303d80SAndy Whitcroft if (write_access && !(vma->vm_flags & VM_SHARED)) 19522b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 19532b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 19542b26736cSAndy Whitcroft goto backout_unlocked; 19552b26736cSAndy Whitcroft } 195657303d80SAndy Whitcroft 1957ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 1958a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 19594c887265SAdam Litke if (idx >= size) 19604c887265SAdam Litke goto backout; 19614c887265SAdam Litke 196283c54070SNick Piggin ret = 0; 19637f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 19644c887265SAdam Litke goto backout; 19654c887265SAdam Litke 19661e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 19671e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 19681e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 19691e8f889bSDavid Gibson 19701e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 19711e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 197204f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 19731e8f889bSDavid Gibson } 19741e8f889bSDavid Gibson 1975ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 19764c887265SAdam Litke unlock_page(page); 19774c887265SAdam Litke out: 1978ac9b9c66SHugh Dickins return ret; 19794c887265SAdam Litke 19804c887265SAdam Litke backout: 19814c887265SAdam Litke spin_unlock(&mm->page_table_lock); 19822b26736cSAndy Whitcroft backout_unlocked: 19834c887265SAdam Litke unlock_page(page); 19844c887265SAdam Litke put_page(page); 19854c887265SAdam Litke goto out; 1986ac9b9c66SHugh Dickins } 1987ac9b9c66SHugh Dickins 198886e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 198986e5216fSAdam Litke unsigned long address, int write_access) 199086e5216fSAdam Litke { 199186e5216fSAdam Litke pte_t *ptep; 199286e5216fSAdam Litke pte_t entry; 19931e8f889bSDavid Gibson int ret; 199457303d80SAndy Whitcroft struct page *pagecache_page = NULL; 19953935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 1996a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 199786e5216fSAdam Litke 1998a5516438SAndi Kleen ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 199986e5216fSAdam Litke if (!ptep) 200086e5216fSAdam Litke return VM_FAULT_OOM; 200186e5216fSAdam Litke 20023935baa9SDavid Gibson /* 20033935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 20043935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 20053935baa9SDavid Gibson * the same page in the page cache. 20063935baa9SDavid Gibson */ 20073935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 20087f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 20097f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 20103935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 2011b4d1d99fSDavid Gibson goto out_mutex; 20123935baa9SDavid Gibson } 201386e5216fSAdam Litke 201483c54070SNick Piggin ret = 0; 20151e8f889bSDavid Gibson 201657303d80SAndy Whitcroft /* 201757303d80SAndy Whitcroft * If we are going to COW the mapping later, we examine the pending 201857303d80SAndy Whitcroft * reservations for this page now. This will ensure that any 201957303d80SAndy Whitcroft * allocations necessary to record that reservation occur outside the 202057303d80SAndy Whitcroft * spinlock. For private mappings, we also lookup the pagecache 202157303d80SAndy Whitcroft * page now as it is used to determine if a reservation has been 202257303d80SAndy Whitcroft * consumed. 202357303d80SAndy Whitcroft */ 202457303d80SAndy Whitcroft if (write_access && !pte_write(entry)) { 20252b26736cSAndy Whitcroft if (vma_needs_reservation(h, vma, address) < 0) { 20262b26736cSAndy Whitcroft ret = VM_FAULT_OOM; 2027b4d1d99fSDavid Gibson goto out_mutex; 20282b26736cSAndy Whitcroft } 202957303d80SAndy Whitcroft 203057303d80SAndy Whitcroft if (!(vma->vm_flags & VM_SHARED)) 203157303d80SAndy Whitcroft pagecache_page = hugetlbfs_pagecache_page(h, 203257303d80SAndy Whitcroft vma, address); 203357303d80SAndy Whitcroft } 203457303d80SAndy Whitcroft 20351e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 20361e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 2037b4d1d99fSDavid Gibson if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 2038b4d1d99fSDavid Gibson goto out_page_table_lock; 2039b4d1d99fSDavid Gibson 2040b4d1d99fSDavid Gibson 2041b4d1d99fSDavid Gibson if (write_access) { 2042b4d1d99fSDavid Gibson if (!pte_write(entry)) { 204357303d80SAndy Whitcroft ret = hugetlb_cow(mm, vma, address, ptep, entry, 204457303d80SAndy Whitcroft pagecache_page); 2045b4d1d99fSDavid Gibson goto out_page_table_lock; 2046b4d1d99fSDavid Gibson } 2047b4d1d99fSDavid Gibson entry = pte_mkdirty(entry); 2048b4d1d99fSDavid Gibson } 2049b4d1d99fSDavid Gibson entry = pte_mkyoung(entry); 2050b4d1d99fSDavid Gibson if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access)) 2051b4d1d99fSDavid Gibson update_mmu_cache(vma, address, entry); 2052b4d1d99fSDavid Gibson 2053b4d1d99fSDavid Gibson out_page_table_lock: 20541e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 205557303d80SAndy Whitcroft 205657303d80SAndy Whitcroft if (pagecache_page) { 205757303d80SAndy Whitcroft unlock_page(pagecache_page); 205857303d80SAndy Whitcroft put_page(pagecache_page); 205957303d80SAndy Whitcroft } 206057303d80SAndy Whitcroft 2061b4d1d99fSDavid Gibson out_mutex: 20623935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 20631e8f889bSDavid Gibson 20641e8f889bSDavid Gibson return ret; 206586e5216fSAdam Litke } 206686e5216fSAdam Litke 2067ceb86879SAndi Kleen /* Can be overriden by architectures */ 2068ceb86879SAndi Kleen __attribute__((weak)) struct page * 2069ceb86879SAndi Kleen follow_huge_pud(struct mm_struct *mm, unsigned long address, 2070ceb86879SAndi Kleen pud_t *pud, int write) 2071ceb86879SAndi Kleen { 2072ceb86879SAndi Kleen BUG(); 2073ceb86879SAndi Kleen return NULL; 2074ceb86879SAndi Kleen } 2075ceb86879SAndi Kleen 207663551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 207763551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 20785b23dbe8SAdam Litke unsigned long *position, int *length, int i, 20795b23dbe8SAdam Litke int write) 208063551ae0SDavid Gibson { 2081d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 2082d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 208363551ae0SDavid Gibson int remainder = *length; 2084a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 208563551ae0SDavid Gibson 20861c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 208763551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 208863551ae0SDavid Gibson pte_t *pte; 208963551ae0SDavid Gibson struct page *page; 209063551ae0SDavid Gibson 20914c887265SAdam Litke /* 20924c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 20934c887265SAdam Litke * each hugepage. We have to make * sure we get the 20944c887265SAdam Litke * first, for the page indexing below to work. 20954c887265SAdam Litke */ 2096a5516438SAndi Kleen pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 209763551ae0SDavid Gibson 20987f2e9525SGerald Schaefer if (!pte || huge_pte_none(huge_ptep_get(pte)) || 20997f2e9525SGerald Schaefer (write && !pte_write(huge_ptep_get(pte)))) { 21004c887265SAdam Litke int ret; 21014c887265SAdam Litke 21024c887265SAdam Litke spin_unlock(&mm->page_table_lock); 21035b23dbe8SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, write); 21044c887265SAdam Litke spin_lock(&mm->page_table_lock); 2105a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 21064c887265SAdam Litke continue; 21074c887265SAdam Litke 21081c59827dSHugh Dickins remainder = 0; 21091c59827dSHugh Dickins if (!i) 21101c59827dSHugh Dickins i = -EFAULT; 21111c59827dSHugh Dickins break; 21121c59827dSHugh Dickins } 211363551ae0SDavid Gibson 2114a5516438SAndi Kleen pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 21157f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 2116d5d4b0aaSChen, Kenneth W same_page: 2117d6692183SChen, Kenneth W if (pages) { 211863551ae0SDavid Gibson get_page(page); 2119d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 2120d6692183SChen, Kenneth W } 212163551ae0SDavid Gibson 212263551ae0SDavid Gibson if (vmas) 212363551ae0SDavid Gibson vmas[i] = vma; 212463551ae0SDavid Gibson 212563551ae0SDavid Gibson vaddr += PAGE_SIZE; 2126d5d4b0aaSChen, Kenneth W ++pfn_offset; 212763551ae0SDavid Gibson --remainder; 212863551ae0SDavid Gibson ++i; 2129d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 2130a5516438SAndi Kleen pfn_offset < pages_per_huge_page(h)) { 2131d5d4b0aaSChen, Kenneth W /* 2132d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 2133d5d4b0aaSChen, Kenneth W * of this compound page. 2134d5d4b0aaSChen, Kenneth W */ 2135d5d4b0aaSChen, Kenneth W goto same_page; 2136d5d4b0aaSChen, Kenneth W } 213763551ae0SDavid Gibson } 21381c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 213963551ae0SDavid Gibson *length = remainder; 214063551ae0SDavid Gibson *position = vaddr; 214163551ae0SDavid Gibson 214263551ae0SDavid Gibson return i; 214363551ae0SDavid Gibson } 21448f860591SZhang, Yanmin 21458f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 21468f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 21478f860591SZhang, Yanmin { 21488f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 21498f860591SZhang, Yanmin unsigned long start = address; 21508f860591SZhang, Yanmin pte_t *ptep; 21518f860591SZhang, Yanmin pte_t pte; 2152a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 21538f860591SZhang, Yanmin 21548f860591SZhang, Yanmin BUG_ON(address >= end); 21558f860591SZhang, Yanmin flush_cache_range(vma, address, end); 21568f860591SZhang, Yanmin 215739dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 21588f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 2159a5516438SAndi Kleen for (; address < end; address += huge_page_size(h)) { 21608f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 21618f860591SZhang, Yanmin if (!ptep) 21628f860591SZhang, Yanmin continue; 216339dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 216439dde65cSChen, Kenneth W continue; 21657f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 21668f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 21678f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 21688f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 21698f860591SZhang, Yanmin } 21708f860591SZhang, Yanmin } 21718f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 217239dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 21738f860591SZhang, Yanmin 21748f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 21758f860591SZhang, Yanmin } 21768f860591SZhang, Yanmin 2177a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 2178a1e78772SMel Gorman long from, long to, 2179a1e78772SMel Gorman struct vm_area_struct *vma) 2180e4e574b7SAdam Litke { 2181e4e574b7SAdam Litke long ret, chg; 2182a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 2183e4e574b7SAdam Litke 2184c37f9fb1SAndy Whitcroft if (vma && vma->vm_flags & VM_NORESERVE) 2185c37f9fb1SAndy Whitcroft return 0; 2186c37f9fb1SAndy Whitcroft 2187a1e78772SMel Gorman /* 2188a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 2189a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 2190a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 2191a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 2192a1e78772SMel Gorman */ 2193a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 2194e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 2195a1e78772SMel Gorman else { 219684afd99bSAndy Whitcroft struct resv_map *resv_map = resv_map_alloc(); 219784afd99bSAndy Whitcroft if (!resv_map) 219884afd99bSAndy Whitcroft return -ENOMEM; 219984afd99bSAndy Whitcroft 2200a1e78772SMel Gorman chg = to - from; 220184afd99bSAndy Whitcroft 220284afd99bSAndy Whitcroft set_vma_resv_map(vma, resv_map); 220304f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 2204a1e78772SMel Gorman } 2205a1e78772SMel Gorman 2206e4e574b7SAdam Litke if (chg < 0) 2207e4e574b7SAdam Litke return chg; 22088a630112SKen Chen 220990d8b7e6SAdam Litke if (hugetlb_get_quota(inode->i_mapping, chg)) 221090d8b7e6SAdam Litke return -ENOSPC; 2211a5516438SAndi Kleen ret = hugetlb_acct_memory(h, chg); 221268842c9bSKen Chen if (ret < 0) { 221368842c9bSKen Chen hugetlb_put_quota(inode->i_mapping, chg); 2214a43a8c39SChen, Kenneth W return ret; 221568842c9bSKen Chen } 2216a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 2217a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 2218a43a8c39SChen, Kenneth W return 0; 2219a43a8c39SChen, Kenneth W } 2220a43a8c39SChen, Kenneth W 2221a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 2222a43a8c39SChen, Kenneth W { 2223a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 2224a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 222545c682a6SKen Chen 222645c682a6SKen Chen spin_lock(&inode->i_lock); 2227a5516438SAndi Kleen inode->i_blocks -= blocks_per_huge_page(h); 222845c682a6SKen Chen spin_unlock(&inode->i_lock); 222945c682a6SKen Chen 223090d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 2231a5516438SAndi Kleen hugetlb_acct_memory(h, -(chg - freed)); 2232a43a8c39SChen, Kenneth W } 2233