11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 121da177e4SLinus Torvalds #include <linux/nodemask.h> 1363551ae0SDavid Gibson #include <linux/pagemap.h> 145da7ca86SChristoph Lameter #include <linux/mempolicy.h> 15aea47ff3SChristoph Lameter #include <linux/cpuset.h> 163935baa9SDavid Gibson #include <linux/mutex.h> 175da7ca86SChristoph Lameter 1863551ae0SDavid Gibson #include <asm/page.h> 1963551ae0SDavid Gibson #include <asm/pgtable.h> 2063551ae0SDavid Gibson 2163551ae0SDavid Gibson #include <linux/hugetlb.h> 227835e98bSNick Piggin #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 26396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 27a5516438SAndi Kleen 28e5ff2159SAndi Kleen static int max_hstate; 29e5ff2159SAndi Kleen unsigned int default_hstate_idx; 30e5ff2159SAndi Kleen struct hstate hstates[HUGE_MAX_HSTATE]; 31e5ff2159SAndi Kleen 32e5ff2159SAndi Kleen /* for command line parsing */ 33e5ff2159SAndi Kleen static struct hstate * __initdata parsed_hstate; 34e5ff2159SAndi Kleen static unsigned long __initdata default_hstate_max_huge_pages; 35e5ff2159SAndi Kleen 36e5ff2159SAndi Kleen #define for_each_hstate(h) \ 37e5ff2159SAndi Kleen for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++) 38396faf03SMel Gorman 393935baa9SDavid Gibson /* 403935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 413935baa9SDavid Gibson */ 423935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 430bd0f9fbSEric Paris 44e7c4b0bfSAndy Whitcroft /* 4596822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 4696822904SAndy Whitcroft * across the pages in a mapping. 4784afd99bSAndy Whitcroft * 4884afd99bSAndy Whitcroft * The region data structures are protected by a combination of the mmap_sem 4984afd99bSAndy Whitcroft * and the hugetlb_instantion_mutex. To access or modify a region the caller 5084afd99bSAndy Whitcroft * must either hold the mmap_sem for write, or the mmap_sem for read and 5184afd99bSAndy Whitcroft * the hugetlb_instantiation mutex: 5284afd99bSAndy Whitcroft * 5384afd99bSAndy Whitcroft * down_write(&mm->mmap_sem); 5484afd99bSAndy Whitcroft * or 5584afd99bSAndy Whitcroft * down_read(&mm->mmap_sem); 5684afd99bSAndy Whitcroft * mutex_lock(&hugetlb_instantiation_mutex); 5796822904SAndy Whitcroft */ 5896822904SAndy Whitcroft struct file_region { 5996822904SAndy Whitcroft struct list_head link; 6096822904SAndy Whitcroft long from; 6196822904SAndy Whitcroft long to; 6296822904SAndy Whitcroft }; 6396822904SAndy Whitcroft 6496822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 6596822904SAndy Whitcroft { 6696822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 6796822904SAndy Whitcroft 6896822904SAndy Whitcroft /* Locate the region we are either in or before. */ 6996822904SAndy Whitcroft list_for_each_entry(rg, head, link) 7096822904SAndy Whitcroft if (f <= rg->to) 7196822904SAndy Whitcroft break; 7296822904SAndy Whitcroft 7396822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 7496822904SAndy Whitcroft if (f > rg->from) 7596822904SAndy Whitcroft f = rg->from; 7696822904SAndy Whitcroft 7796822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 7896822904SAndy Whitcroft nrg = rg; 7996822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 8096822904SAndy Whitcroft if (&rg->link == head) 8196822904SAndy Whitcroft break; 8296822904SAndy Whitcroft if (rg->from > t) 8396822904SAndy Whitcroft break; 8496822904SAndy Whitcroft 8596822904SAndy Whitcroft /* If this area reaches higher then extend our area to 8696822904SAndy Whitcroft * include it completely. If this is not the first area 8796822904SAndy Whitcroft * which we intend to reuse, free it. */ 8896822904SAndy Whitcroft if (rg->to > t) 8996822904SAndy Whitcroft t = rg->to; 9096822904SAndy Whitcroft if (rg != nrg) { 9196822904SAndy Whitcroft list_del(&rg->link); 9296822904SAndy Whitcroft kfree(rg); 9396822904SAndy Whitcroft } 9496822904SAndy Whitcroft } 9596822904SAndy Whitcroft nrg->from = f; 9696822904SAndy Whitcroft nrg->to = t; 9796822904SAndy Whitcroft return 0; 9896822904SAndy Whitcroft } 9996822904SAndy Whitcroft 10096822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 10196822904SAndy Whitcroft { 10296822904SAndy Whitcroft struct file_region *rg, *nrg; 10396822904SAndy Whitcroft long chg = 0; 10496822904SAndy Whitcroft 10596822904SAndy Whitcroft /* Locate the region we are before or in. */ 10696822904SAndy Whitcroft list_for_each_entry(rg, head, link) 10796822904SAndy Whitcroft if (f <= rg->to) 10896822904SAndy Whitcroft break; 10996822904SAndy Whitcroft 11096822904SAndy Whitcroft /* If we are below the current region then a new region is required. 11196822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 11296822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 11396822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 11496822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 11596822904SAndy Whitcroft if (!nrg) 11696822904SAndy Whitcroft return -ENOMEM; 11796822904SAndy Whitcroft nrg->from = f; 11896822904SAndy Whitcroft nrg->to = f; 11996822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 12096822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 12196822904SAndy Whitcroft 12296822904SAndy Whitcroft return t - f; 12396822904SAndy Whitcroft } 12496822904SAndy Whitcroft 12596822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 12696822904SAndy Whitcroft if (f > rg->from) 12796822904SAndy Whitcroft f = rg->from; 12896822904SAndy Whitcroft chg = t - f; 12996822904SAndy Whitcroft 13096822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 13196822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 13296822904SAndy Whitcroft if (&rg->link == head) 13396822904SAndy Whitcroft break; 13496822904SAndy Whitcroft if (rg->from > t) 13596822904SAndy Whitcroft return chg; 13696822904SAndy Whitcroft 13796822904SAndy Whitcroft /* We overlap with this area, if it extends futher than 13896822904SAndy Whitcroft * us then we must extend ourselves. Account for its 13996822904SAndy Whitcroft * existing reservation. */ 14096822904SAndy Whitcroft if (rg->to > t) { 14196822904SAndy Whitcroft chg += rg->to - t; 14296822904SAndy Whitcroft t = rg->to; 14396822904SAndy Whitcroft } 14496822904SAndy Whitcroft chg -= rg->to - rg->from; 14596822904SAndy Whitcroft } 14696822904SAndy Whitcroft return chg; 14796822904SAndy Whitcroft } 14896822904SAndy Whitcroft 14996822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 15096822904SAndy Whitcroft { 15196822904SAndy Whitcroft struct file_region *rg, *trg; 15296822904SAndy Whitcroft long chg = 0; 15396822904SAndy Whitcroft 15496822904SAndy Whitcroft /* Locate the region we are either in or before. */ 15596822904SAndy Whitcroft list_for_each_entry(rg, head, link) 15696822904SAndy Whitcroft if (end <= rg->to) 15796822904SAndy Whitcroft break; 15896822904SAndy Whitcroft if (&rg->link == head) 15996822904SAndy Whitcroft return 0; 16096822904SAndy Whitcroft 16196822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 16296822904SAndy Whitcroft if (end > rg->from) { 16396822904SAndy Whitcroft chg = rg->to - end; 16496822904SAndy Whitcroft rg->to = end; 16596822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 16696822904SAndy Whitcroft } 16796822904SAndy Whitcroft 16896822904SAndy Whitcroft /* Drop any remaining regions. */ 16996822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 17096822904SAndy Whitcroft if (&rg->link == head) 17196822904SAndy Whitcroft break; 17296822904SAndy Whitcroft chg += rg->to - rg->from; 17396822904SAndy Whitcroft list_del(&rg->link); 17496822904SAndy Whitcroft kfree(rg); 17596822904SAndy Whitcroft } 17696822904SAndy Whitcroft return chg; 17796822904SAndy Whitcroft } 17896822904SAndy Whitcroft 17984afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t) 18084afd99bSAndy Whitcroft { 18184afd99bSAndy Whitcroft struct file_region *rg; 18284afd99bSAndy Whitcroft long chg = 0; 18384afd99bSAndy Whitcroft 18484afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 18584afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 18684afd99bSAndy Whitcroft int seg_from; 18784afd99bSAndy Whitcroft int seg_to; 18884afd99bSAndy Whitcroft 18984afd99bSAndy Whitcroft if (rg->to <= f) 19084afd99bSAndy Whitcroft continue; 19184afd99bSAndy Whitcroft if (rg->from >= t) 19284afd99bSAndy Whitcroft break; 19384afd99bSAndy Whitcroft 19484afd99bSAndy Whitcroft seg_from = max(rg->from, f); 19584afd99bSAndy Whitcroft seg_to = min(rg->to, t); 19684afd99bSAndy Whitcroft 19784afd99bSAndy Whitcroft chg += seg_to - seg_from; 19884afd99bSAndy Whitcroft } 19984afd99bSAndy Whitcroft 20084afd99bSAndy Whitcroft return chg; 20184afd99bSAndy Whitcroft } 20284afd99bSAndy Whitcroft 20396822904SAndy Whitcroft /* 204e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 205e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 206e7c4b0bfSAndy Whitcroft */ 207a5516438SAndi Kleen static pgoff_t vma_hugecache_offset(struct hstate *h, 208a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 209e7c4b0bfSAndy Whitcroft { 210a5516438SAndi Kleen return ((address - vma->vm_start) >> huge_page_shift(h)) + 211a5516438SAndi Kleen (vma->vm_pgoff >> huge_page_order(h)); 212e7c4b0bfSAndy Whitcroft } 213e7c4b0bfSAndy Whitcroft 21484afd99bSAndy Whitcroft /* 21584afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 21684afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 21784afd99bSAndy Whitcroft * alignment. 21884afd99bSAndy Whitcroft */ 21984afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 22084afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 22104f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 22284afd99bSAndy Whitcroft 223a1e78772SMel Gorman /* 224a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 225a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 226a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 227a1e78772SMel Gorman * 228a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 229a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 230a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 231a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 23284afd99bSAndy Whitcroft * 23384afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 23484afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 23584afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 23684afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 23784afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 23884afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 23984afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 24084afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 241a1e78772SMel Gorman */ 242e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 243e7c4b0bfSAndy Whitcroft { 244e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 245e7c4b0bfSAndy Whitcroft } 246e7c4b0bfSAndy Whitcroft 247e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 248e7c4b0bfSAndy Whitcroft unsigned long value) 249e7c4b0bfSAndy Whitcroft { 250e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 251e7c4b0bfSAndy Whitcroft } 252e7c4b0bfSAndy Whitcroft 25384afd99bSAndy Whitcroft struct resv_map { 25484afd99bSAndy Whitcroft struct kref refs; 25584afd99bSAndy Whitcroft struct list_head regions; 25684afd99bSAndy Whitcroft }; 25784afd99bSAndy Whitcroft 25884afd99bSAndy Whitcroft struct resv_map *resv_map_alloc(void) 25984afd99bSAndy Whitcroft { 26084afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 26184afd99bSAndy Whitcroft if (!resv_map) 26284afd99bSAndy Whitcroft return NULL; 26384afd99bSAndy Whitcroft 26484afd99bSAndy Whitcroft kref_init(&resv_map->refs); 26584afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 26684afd99bSAndy Whitcroft 26784afd99bSAndy Whitcroft return resv_map; 26884afd99bSAndy Whitcroft } 26984afd99bSAndy Whitcroft 27084afd99bSAndy Whitcroft void resv_map_release(struct kref *ref) 27184afd99bSAndy Whitcroft { 27284afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 27384afd99bSAndy Whitcroft 27484afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 27584afd99bSAndy Whitcroft region_truncate(&resv_map->regions, 0); 27684afd99bSAndy Whitcroft kfree(resv_map); 27784afd99bSAndy Whitcroft } 27884afd99bSAndy Whitcroft 27984afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 280a1e78772SMel Gorman { 281a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 282a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 28384afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 28484afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 285a1e78772SMel Gorman return 0; 286a1e78772SMel Gorman } 287a1e78772SMel Gorman 28884afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 289a1e78772SMel Gorman { 290a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 291a1e78772SMel Gorman VM_BUG_ON(vma->vm_flags & VM_SHARED); 292a1e78772SMel Gorman 29384afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 29484afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 29504f2cbe3SMel Gorman } 29604f2cbe3SMel Gorman 29704f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 29804f2cbe3SMel Gorman { 29904f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 300e7c4b0bfSAndy Whitcroft VM_BUG_ON(vma->vm_flags & VM_SHARED); 301e7c4b0bfSAndy Whitcroft 302e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 30304f2cbe3SMel Gorman } 30404f2cbe3SMel Gorman 30504f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 30604f2cbe3SMel Gorman { 30704f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 308e7c4b0bfSAndy Whitcroft 309e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 310a1e78772SMel Gorman } 311a1e78772SMel Gorman 312a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */ 313a5516438SAndi Kleen static void decrement_hugepage_resv_vma(struct hstate *h, 314a5516438SAndi Kleen struct vm_area_struct *vma) 315a1e78772SMel Gorman { 316c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_NORESERVE) 317c37f9fb1SAndy Whitcroft return; 318c37f9fb1SAndy Whitcroft 319a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) { 320a1e78772SMel Gorman /* Shared mappings always use reserves */ 321a5516438SAndi Kleen h->resv_huge_pages--; 32284afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 323a1e78772SMel Gorman /* 324a1e78772SMel Gorman * Only the process that called mmap() has reserves for 325a1e78772SMel Gorman * private mappings. 326a1e78772SMel Gorman */ 327a5516438SAndi Kleen h->resv_huge_pages--; 328a1e78772SMel Gorman } 329a1e78772SMel Gorman } 330a1e78772SMel Gorman 33104f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 332a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 333a1e78772SMel Gorman { 334a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 335a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 336a1e78772SMel Gorman vma->vm_private_data = (void *)0; 337a1e78772SMel Gorman } 338a1e78772SMel Gorman 339a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 340a1e78772SMel Gorman static int vma_has_private_reserves(struct vm_area_struct *vma) 341a1e78772SMel Gorman { 342a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) 343a1e78772SMel Gorman return 0; 34484afd99bSAndy Whitcroft if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 345a1e78772SMel Gorman return 0; 346a1e78772SMel Gorman return 1; 347a1e78772SMel Gorman } 348a1e78772SMel Gorman 349a5516438SAndi Kleen static void clear_huge_page(struct page *page, 350a5516438SAndi Kleen unsigned long addr, unsigned long sz) 35179ac6ba4SDavid Gibson { 35279ac6ba4SDavid Gibson int i; 35379ac6ba4SDavid Gibson 35479ac6ba4SDavid Gibson might_sleep(); 355a5516438SAndi Kleen for (i = 0; i < sz/PAGE_SIZE; i++) { 35679ac6ba4SDavid Gibson cond_resched(); 357281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 35879ac6ba4SDavid Gibson } 35979ac6ba4SDavid Gibson } 36079ac6ba4SDavid Gibson 36179ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 3629de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 36379ac6ba4SDavid Gibson { 36479ac6ba4SDavid Gibson int i; 365a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 36679ac6ba4SDavid Gibson 36779ac6ba4SDavid Gibson might_sleep(); 368a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 36979ac6ba4SDavid Gibson cond_resched(); 3709de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 37179ac6ba4SDavid Gibson } 37279ac6ba4SDavid Gibson } 37379ac6ba4SDavid Gibson 374a5516438SAndi Kleen static void enqueue_huge_page(struct hstate *h, struct page *page) 3751da177e4SLinus Torvalds { 3761da177e4SLinus Torvalds int nid = page_to_nid(page); 377a5516438SAndi Kleen list_add(&page->lru, &h->hugepage_freelists[nid]); 378a5516438SAndi Kleen h->free_huge_pages++; 379a5516438SAndi Kleen h->free_huge_pages_node[nid]++; 3801da177e4SLinus Torvalds } 3811da177e4SLinus Torvalds 382a5516438SAndi Kleen static struct page *dequeue_huge_page(struct hstate *h) 383348e1e04SNishanth Aravamudan { 384348e1e04SNishanth Aravamudan int nid; 385348e1e04SNishanth Aravamudan struct page *page = NULL; 386348e1e04SNishanth Aravamudan 387348e1e04SNishanth Aravamudan for (nid = 0; nid < MAX_NUMNODES; ++nid) { 388a5516438SAndi Kleen if (!list_empty(&h->hugepage_freelists[nid])) { 389a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 390348e1e04SNishanth Aravamudan struct page, lru); 391348e1e04SNishanth Aravamudan list_del(&page->lru); 392a5516438SAndi Kleen h->free_huge_pages--; 393a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 394348e1e04SNishanth Aravamudan break; 395348e1e04SNishanth Aravamudan } 396348e1e04SNishanth Aravamudan } 397348e1e04SNishanth Aravamudan return page; 398348e1e04SNishanth Aravamudan } 399348e1e04SNishanth Aravamudan 400a5516438SAndi Kleen static struct page *dequeue_huge_page_vma(struct hstate *h, 401a5516438SAndi Kleen struct vm_area_struct *vma, 40204f2cbe3SMel Gorman unsigned long address, int avoid_reserve) 4031da177e4SLinus Torvalds { 40431a5c6e4SNishanth Aravamudan int nid; 4051da177e4SLinus Torvalds struct page *page = NULL; 406480eccf9SLee Schermerhorn struct mempolicy *mpol; 40719770b32SMel Gorman nodemask_t *nodemask; 408396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 40919770b32SMel Gorman htlb_alloc_mask, &mpol, &nodemask); 410dd1a239fSMel Gorman struct zone *zone; 411dd1a239fSMel Gorman struct zoneref *z; 4121da177e4SLinus Torvalds 413a1e78772SMel Gorman /* 414a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 415a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 416a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 417a1e78772SMel Gorman */ 418a1e78772SMel Gorman if (!vma_has_private_reserves(vma) && 419a5516438SAndi Kleen h->free_huge_pages - h->resv_huge_pages == 0) 420a1e78772SMel Gorman return NULL; 421a1e78772SMel Gorman 42204f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 423a5516438SAndi Kleen if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 42404f2cbe3SMel Gorman return NULL; 42504f2cbe3SMel Gorman 42619770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 42719770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 42854a6eb5cSMel Gorman nid = zone_to_nid(zone); 42954a6eb5cSMel Gorman if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 430a5516438SAndi Kleen !list_empty(&h->hugepage_freelists[nid])) { 431a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 4321da177e4SLinus Torvalds struct page, lru); 4331da177e4SLinus Torvalds list_del(&page->lru); 434a5516438SAndi Kleen h->free_huge_pages--; 435a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 43604f2cbe3SMel Gorman 43704f2cbe3SMel Gorman if (!avoid_reserve) 438a5516438SAndi Kleen decrement_hugepage_resv_vma(h, vma); 439a1e78772SMel Gorman 4405ab3ee7bSKen Chen break; 4411da177e4SLinus Torvalds } 4423abf7afdSAndrew Morton } 44352cd3b07SLee Schermerhorn mpol_cond_put(mpol); 4441da177e4SLinus Torvalds return page; 4451da177e4SLinus Torvalds } 4461da177e4SLinus Torvalds 447a5516438SAndi Kleen static void update_and_free_page(struct hstate *h, struct page *page) 4486af2acb6SAdam Litke { 4496af2acb6SAdam Litke int i; 450a5516438SAndi Kleen 451a5516438SAndi Kleen h->nr_huge_pages--; 452a5516438SAndi Kleen h->nr_huge_pages_node[page_to_nid(page)]--; 453a5516438SAndi Kleen for (i = 0; i < pages_per_huge_page(h); i++) { 4546af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 4556af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 4566af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 4576af2acb6SAdam Litke } 4586af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 4596af2acb6SAdam Litke set_page_refcounted(page); 4607f2e9525SGerald Schaefer arch_release_hugepage(page); 461a5516438SAndi Kleen __free_pages(page, huge_page_order(h)); 4626af2acb6SAdam Litke } 4636af2acb6SAdam Litke 464e5ff2159SAndi Kleen struct hstate *size_to_hstate(unsigned long size) 465e5ff2159SAndi Kleen { 466e5ff2159SAndi Kleen struct hstate *h; 467e5ff2159SAndi Kleen 468e5ff2159SAndi Kleen for_each_hstate(h) { 469e5ff2159SAndi Kleen if (huge_page_size(h) == size) 470e5ff2159SAndi Kleen return h; 471e5ff2159SAndi Kleen } 472e5ff2159SAndi Kleen return NULL; 473e5ff2159SAndi Kleen } 474e5ff2159SAndi Kleen 47527a85ef1SDavid Gibson static void free_huge_page(struct page *page) 47627a85ef1SDavid Gibson { 477a5516438SAndi Kleen /* 478a5516438SAndi Kleen * Can't pass hstate in here because it is called from the 479a5516438SAndi Kleen * compound page destructor. 480a5516438SAndi Kleen */ 481e5ff2159SAndi Kleen struct hstate *h = page_hstate(page); 4827893d1d5SAdam Litke int nid = page_to_nid(page); 483c79fb75eSAdam Litke struct address_space *mapping; 48427a85ef1SDavid Gibson 485c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 486e5df70abSAndy Whitcroft set_page_private(page, 0); 4877893d1d5SAdam Litke BUG_ON(page_count(page)); 48827a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 48927a85ef1SDavid Gibson 49027a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 491a5516438SAndi Kleen if (h->surplus_huge_pages_node[nid]) { 492a5516438SAndi Kleen update_and_free_page(h, page); 493a5516438SAndi Kleen h->surplus_huge_pages--; 494a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 4957893d1d5SAdam Litke } else { 496a5516438SAndi Kleen enqueue_huge_page(h, page); 4977893d1d5SAdam Litke } 49827a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 499c79fb75eSAdam Litke if (mapping) 5009a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 50127a85ef1SDavid Gibson } 50227a85ef1SDavid Gibson 5037893d1d5SAdam Litke /* 5047893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 5057893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 5067893d1d5SAdam Litke * Returns 1 if an adjustment was made. 5077893d1d5SAdam Litke */ 508a5516438SAndi Kleen static int adjust_pool_surplus(struct hstate *h, int delta) 5097893d1d5SAdam Litke { 5107893d1d5SAdam Litke static int prev_nid; 5117893d1d5SAdam Litke int nid = prev_nid; 5127893d1d5SAdam Litke int ret = 0; 5137893d1d5SAdam Litke 5147893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 5157893d1d5SAdam Litke do { 5167893d1d5SAdam Litke nid = next_node(nid, node_online_map); 5177893d1d5SAdam Litke if (nid == MAX_NUMNODES) 5187893d1d5SAdam Litke nid = first_node(node_online_map); 5197893d1d5SAdam Litke 5207893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 521a5516438SAndi Kleen if (delta < 0 && !h->surplus_huge_pages_node[nid]) 5227893d1d5SAdam Litke continue; 5237893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 524a5516438SAndi Kleen if (delta > 0 && h->surplus_huge_pages_node[nid] >= 525a5516438SAndi Kleen h->nr_huge_pages_node[nid]) 5267893d1d5SAdam Litke continue; 5277893d1d5SAdam Litke 528a5516438SAndi Kleen h->surplus_huge_pages += delta; 529a5516438SAndi Kleen h->surplus_huge_pages_node[nid] += delta; 5307893d1d5SAdam Litke ret = 1; 5317893d1d5SAdam Litke break; 5327893d1d5SAdam Litke } while (nid != prev_nid); 5337893d1d5SAdam Litke 5347893d1d5SAdam Litke prev_nid = nid; 5357893d1d5SAdam Litke return ret; 5367893d1d5SAdam Litke } 5377893d1d5SAdam Litke 538a5516438SAndi Kleen static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 539b7ba30c6SAndi Kleen { 540b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 541b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 542a5516438SAndi Kleen h->nr_huge_pages++; 543a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 544b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 545b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 546b7ba30c6SAndi Kleen } 547b7ba30c6SAndi Kleen 548a5516438SAndi Kleen static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) 5491da177e4SLinus Torvalds { 5501da177e4SLinus Torvalds struct page *page; 551f96efd58SJoe Jin 55263b4613cSNishanth Aravamudan page = alloc_pages_node(nid, 553551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 554551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 555a5516438SAndi Kleen huge_page_order(h)); 5561da177e4SLinus Torvalds if (page) { 5577f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 5587f2e9525SGerald Schaefer __free_pages(page, HUGETLB_PAGE_ORDER); 5597b8ee84dSHarvey Harrison return NULL; 5607f2e9525SGerald Schaefer } 561a5516438SAndi Kleen prep_new_huge_page(h, page, nid); 5621da177e4SLinus Torvalds } 56363b4613cSNishanth Aravamudan 56463b4613cSNishanth Aravamudan return page; 56563b4613cSNishanth Aravamudan } 56663b4613cSNishanth Aravamudan 567a5516438SAndi Kleen static int alloc_fresh_huge_page(struct hstate *h) 56863b4613cSNishanth Aravamudan { 56963b4613cSNishanth Aravamudan struct page *page; 57063b4613cSNishanth Aravamudan int start_nid; 57163b4613cSNishanth Aravamudan int next_nid; 57263b4613cSNishanth Aravamudan int ret = 0; 57363b4613cSNishanth Aravamudan 574a5516438SAndi Kleen start_nid = h->hugetlb_next_nid; 57563b4613cSNishanth Aravamudan 57663b4613cSNishanth Aravamudan do { 577a5516438SAndi Kleen page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); 57863b4613cSNishanth Aravamudan if (page) 57963b4613cSNishanth Aravamudan ret = 1; 58063b4613cSNishanth Aravamudan /* 58163b4613cSNishanth Aravamudan * Use a helper variable to find the next node and then 58263b4613cSNishanth Aravamudan * copy it back to hugetlb_next_nid afterwards: 58363b4613cSNishanth Aravamudan * otherwise there's a window in which a racer might 58463b4613cSNishanth Aravamudan * pass invalid nid MAX_NUMNODES to alloc_pages_node. 58563b4613cSNishanth Aravamudan * But we don't need to use a spin_lock here: it really 58663b4613cSNishanth Aravamudan * doesn't matter if occasionally a racer chooses the 58763b4613cSNishanth Aravamudan * same nid as we do. Move nid forward in the mask even 58863b4613cSNishanth Aravamudan * if we just successfully allocated a hugepage so that 58963b4613cSNishanth Aravamudan * the next caller gets hugepages on the next node. 59063b4613cSNishanth Aravamudan */ 591a5516438SAndi Kleen next_nid = next_node(h->hugetlb_next_nid, node_online_map); 59263b4613cSNishanth Aravamudan if (next_nid == MAX_NUMNODES) 59363b4613cSNishanth Aravamudan next_nid = first_node(node_online_map); 594a5516438SAndi Kleen h->hugetlb_next_nid = next_nid; 595a5516438SAndi Kleen } while (!page && h->hugetlb_next_nid != start_nid); 59663b4613cSNishanth Aravamudan 5973b116300SAdam Litke if (ret) 5983b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 5993b116300SAdam Litke else 6003b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 6013b116300SAdam Litke 60263b4613cSNishanth Aravamudan return ret; 6031da177e4SLinus Torvalds } 6041da177e4SLinus Torvalds 605a5516438SAndi Kleen static struct page *alloc_buddy_huge_page(struct hstate *h, 606a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 6077893d1d5SAdam Litke { 6087893d1d5SAdam Litke struct page *page; 609d1c3fb1fSNishanth Aravamudan unsigned int nid; 6107893d1d5SAdam Litke 611d1c3fb1fSNishanth Aravamudan /* 612d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 613d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 614d1c3fb1fSNishanth Aravamudan * overcommit 615d1c3fb1fSNishanth Aravamudan * 616d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 617d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 618d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 619d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 620d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 621d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 622d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 623d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 624d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 625d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 626d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 627d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 628d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 629d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 630d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 631d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 632d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 633d1c3fb1fSNishanth Aravamudan */ 634d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 635a5516438SAndi Kleen if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 636d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 637d1c3fb1fSNishanth Aravamudan return NULL; 638d1c3fb1fSNishanth Aravamudan } else { 639a5516438SAndi Kleen h->nr_huge_pages++; 640a5516438SAndi Kleen h->surplus_huge_pages++; 641d1c3fb1fSNishanth Aravamudan } 642d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 643d1c3fb1fSNishanth Aravamudan 644551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 645551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 646a5516438SAndi Kleen huge_page_order(h)); 647d1c3fb1fSNishanth Aravamudan 6487893d1d5SAdam Litke spin_lock(&hugetlb_lock); 649d1c3fb1fSNishanth Aravamudan if (page) { 6502668db91SAdam Litke /* 6512668db91SAdam Litke * This page is now managed by the hugetlb allocator and has 6522668db91SAdam Litke * no users -- drop the buddy allocator's reference. 6532668db91SAdam Litke */ 6542668db91SAdam Litke put_page_testzero(page); 6552668db91SAdam Litke VM_BUG_ON(page_count(page)); 656d1c3fb1fSNishanth Aravamudan nid = page_to_nid(page); 657d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 658d1c3fb1fSNishanth Aravamudan /* 659d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 660d1c3fb1fSNishanth Aravamudan */ 661a5516438SAndi Kleen h->nr_huge_pages_node[nid]++; 662a5516438SAndi Kleen h->surplus_huge_pages_node[nid]++; 6633b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 664d1c3fb1fSNishanth Aravamudan } else { 665a5516438SAndi Kleen h->nr_huge_pages--; 666a5516438SAndi Kleen h->surplus_huge_pages--; 6673b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 6687893d1d5SAdam Litke } 669d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 6707893d1d5SAdam Litke 6717893d1d5SAdam Litke return page; 6727893d1d5SAdam Litke } 6737893d1d5SAdam Litke 674e4e574b7SAdam Litke /* 675e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 676e4e574b7SAdam Litke * of size 'delta'. 677e4e574b7SAdam Litke */ 678a5516438SAndi Kleen static int gather_surplus_pages(struct hstate *h, int delta) 679e4e574b7SAdam Litke { 680e4e574b7SAdam Litke struct list_head surplus_list; 681e4e574b7SAdam Litke struct page *page, *tmp; 682e4e574b7SAdam Litke int ret, i; 683e4e574b7SAdam Litke int needed, allocated; 684e4e574b7SAdam Litke 685a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 686ac09b3a1SAdam Litke if (needed <= 0) { 687a5516438SAndi Kleen h->resv_huge_pages += delta; 688e4e574b7SAdam Litke return 0; 689ac09b3a1SAdam Litke } 690e4e574b7SAdam Litke 691e4e574b7SAdam Litke allocated = 0; 692e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 693e4e574b7SAdam Litke 694e4e574b7SAdam Litke ret = -ENOMEM; 695e4e574b7SAdam Litke retry: 696e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 697e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 698a5516438SAndi Kleen page = alloc_buddy_huge_page(h, NULL, 0); 699e4e574b7SAdam Litke if (!page) { 700e4e574b7SAdam Litke /* 701e4e574b7SAdam Litke * We were not able to allocate enough pages to 702e4e574b7SAdam Litke * satisfy the entire reservation so we free what 703e4e574b7SAdam Litke * we've allocated so far. 704e4e574b7SAdam Litke */ 705e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 706e4e574b7SAdam Litke needed = 0; 707e4e574b7SAdam Litke goto free; 708e4e574b7SAdam Litke } 709e4e574b7SAdam Litke 710e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 711e4e574b7SAdam Litke } 712e4e574b7SAdam Litke allocated += needed; 713e4e574b7SAdam Litke 714e4e574b7SAdam Litke /* 715e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 716e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 717e4e574b7SAdam Litke */ 718e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 719a5516438SAndi Kleen needed = (h->resv_huge_pages + delta) - 720a5516438SAndi Kleen (h->free_huge_pages + allocated); 721e4e574b7SAdam Litke if (needed > 0) 722e4e574b7SAdam Litke goto retry; 723e4e574b7SAdam Litke 724e4e574b7SAdam Litke /* 725e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 726e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 727e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 728ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 729ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 730ac09b3a1SAdam Litke * before they are reserved. 731e4e574b7SAdam Litke */ 732e4e574b7SAdam Litke needed += allocated; 733a5516438SAndi Kleen h->resv_huge_pages += delta; 734e4e574b7SAdam Litke ret = 0; 735e4e574b7SAdam Litke free: 73619fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 73719fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 73819fc3f0aSAdam Litke if ((--needed) < 0) 73919fc3f0aSAdam Litke break; 74019fc3f0aSAdam Litke list_del(&page->lru); 741a5516438SAndi Kleen enqueue_huge_page(h, page); 74219fc3f0aSAdam Litke } 74319fc3f0aSAdam Litke 74419fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 74519fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 74619fc3f0aSAdam Litke spin_unlock(&hugetlb_lock); 747e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 748e4e574b7SAdam Litke list_del(&page->lru); 749af767cbdSAdam Litke /* 7502668db91SAdam Litke * The page has a reference count of zero already, so 7512668db91SAdam Litke * call free_huge_page directly instead of using 7522668db91SAdam Litke * put_page. This must be done with hugetlb_lock 753af767cbdSAdam Litke * unlocked which is safe because free_huge_page takes 754af767cbdSAdam Litke * hugetlb_lock before deciding how to free the page. 755af767cbdSAdam Litke */ 7562668db91SAdam Litke free_huge_page(page); 757af767cbdSAdam Litke } 75819fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 759e4e574b7SAdam Litke } 760e4e574b7SAdam Litke 761e4e574b7SAdam Litke return ret; 762e4e574b7SAdam Litke } 763e4e574b7SAdam Litke 764e4e574b7SAdam Litke /* 765e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 766e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 767e4e574b7SAdam Litke * never used. 768e4e574b7SAdam Litke */ 769a5516438SAndi Kleen static void return_unused_surplus_pages(struct hstate *h, 770a5516438SAndi Kleen unsigned long unused_resv_pages) 771e4e574b7SAdam Litke { 772e4e574b7SAdam Litke static int nid = -1; 773e4e574b7SAdam Litke struct page *page; 774e4e574b7SAdam Litke unsigned long nr_pages; 775e4e574b7SAdam Litke 77611320d17SNishanth Aravamudan /* 77711320d17SNishanth Aravamudan * We want to release as many surplus pages as possible, spread 77811320d17SNishanth Aravamudan * evenly across all nodes. Iterate across all nodes until we 77911320d17SNishanth Aravamudan * can no longer free unreserved surplus pages. This occurs when 78011320d17SNishanth Aravamudan * the nodes with surplus pages have no free pages. 78111320d17SNishanth Aravamudan */ 78211320d17SNishanth Aravamudan unsigned long remaining_iterations = num_online_nodes(); 78311320d17SNishanth Aravamudan 784ac09b3a1SAdam Litke /* Uncommit the reservation */ 785a5516438SAndi Kleen h->resv_huge_pages -= unused_resv_pages; 786ac09b3a1SAdam Litke 787a5516438SAndi Kleen nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 788e4e574b7SAdam Litke 78911320d17SNishanth Aravamudan while (remaining_iterations-- && nr_pages) { 790e4e574b7SAdam Litke nid = next_node(nid, node_online_map); 791e4e574b7SAdam Litke if (nid == MAX_NUMNODES) 792e4e574b7SAdam Litke nid = first_node(node_online_map); 793e4e574b7SAdam Litke 794a5516438SAndi Kleen if (!h->surplus_huge_pages_node[nid]) 795e4e574b7SAdam Litke continue; 796e4e574b7SAdam Litke 797a5516438SAndi Kleen if (!list_empty(&h->hugepage_freelists[nid])) { 798a5516438SAndi Kleen page = list_entry(h->hugepage_freelists[nid].next, 799e4e574b7SAdam Litke struct page, lru); 800e4e574b7SAdam Litke list_del(&page->lru); 801a5516438SAndi Kleen update_and_free_page(h, page); 802a5516438SAndi Kleen h->free_huge_pages--; 803a5516438SAndi Kleen h->free_huge_pages_node[nid]--; 804a5516438SAndi Kleen h->surplus_huge_pages--; 805a5516438SAndi Kleen h->surplus_huge_pages_node[nid]--; 806e4e574b7SAdam Litke nr_pages--; 80711320d17SNishanth Aravamudan remaining_iterations = num_online_nodes(); 808e4e574b7SAdam Litke } 809e4e574b7SAdam Litke } 810e4e574b7SAdam Litke } 811e4e574b7SAdam Litke 812c37f9fb1SAndy Whitcroft /* 813c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 814c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 815c37f9fb1SAndy Whitcroft * reservation and actually increase quota before an allocation can occur. 816c37f9fb1SAndy Whitcroft * Where any new reservation would be required the reservation change is 817c37f9fb1SAndy Whitcroft * prepared, but not committed. Once the page has been quota'd allocated 818c37f9fb1SAndy Whitcroft * an instantiated the change should be committed via vma_commit_reservation. 819c37f9fb1SAndy Whitcroft * No action is required on failure. 820c37f9fb1SAndy Whitcroft */ 821a5516438SAndi Kleen static int vma_needs_reservation(struct hstate *h, 822a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 823c37f9fb1SAndy Whitcroft { 824c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 825c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 826c37f9fb1SAndy Whitcroft 827c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 828a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 829c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 830c37f9fb1SAndy Whitcroft idx, idx + 1); 831c37f9fb1SAndy Whitcroft 83284afd99bSAndy Whitcroft } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 833c37f9fb1SAndy Whitcroft return 1; 834c37f9fb1SAndy Whitcroft 83584afd99bSAndy Whitcroft } else { 83684afd99bSAndy Whitcroft int err; 837a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 83884afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 83984afd99bSAndy Whitcroft 84084afd99bSAndy Whitcroft err = region_chg(&reservations->regions, idx, idx + 1); 84184afd99bSAndy Whitcroft if (err < 0) 84284afd99bSAndy Whitcroft return err; 843c37f9fb1SAndy Whitcroft return 0; 844c37f9fb1SAndy Whitcroft } 84584afd99bSAndy Whitcroft } 846a5516438SAndi Kleen static void vma_commit_reservation(struct hstate *h, 847a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long addr) 848c37f9fb1SAndy Whitcroft { 849c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 850c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 851c37f9fb1SAndy Whitcroft 852c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 853a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 854c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 85584afd99bSAndy Whitcroft 85684afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 857a5516438SAndi Kleen pgoff_t idx = vma_hugecache_offset(h, vma, addr); 85884afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 85984afd99bSAndy Whitcroft 86084afd99bSAndy Whitcroft /* Mark this page used in the map. */ 86184afd99bSAndy Whitcroft region_add(&reservations->regions, idx, idx + 1); 862c37f9fb1SAndy Whitcroft } 863c37f9fb1SAndy Whitcroft } 864c37f9fb1SAndy Whitcroft 865348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 86604f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 867348ea204SAdam Litke { 868a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 869348ea204SAdam Litke struct page *page; 8702fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 871a1e78772SMel Gorman struct inode *inode = mapping->host; 872c37f9fb1SAndy Whitcroft unsigned int chg; 8732fc39cecSAdam Litke 874a1e78772SMel Gorman /* 875a1e78772SMel Gorman * Processes that did not create the mapping will have no reserves and 876a1e78772SMel Gorman * will not have accounted against quota. Check that the quota can be 877a1e78772SMel Gorman * made before satisfying the allocation 878c37f9fb1SAndy Whitcroft * MAP_NORESERVE mappings may also need pages and quota allocated 879c37f9fb1SAndy Whitcroft * if no reserve mapping overlaps. 880a1e78772SMel Gorman */ 881a5516438SAndi Kleen chg = vma_needs_reservation(h, vma, addr); 882c37f9fb1SAndy Whitcroft if (chg < 0) 883c37f9fb1SAndy Whitcroft return ERR_PTR(chg); 884c37f9fb1SAndy Whitcroft if (chg) 885a1e78772SMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 886a1e78772SMel Gorman return ERR_PTR(-ENOSPC); 88790d8b7e6SAdam Litke 888a1e78772SMel Gorman spin_lock(&hugetlb_lock); 889a5516438SAndi Kleen page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 890a1e78772SMel Gorman spin_unlock(&hugetlb_lock); 891a1e78772SMel Gorman 892a1e78772SMel Gorman if (!page) { 893a5516438SAndi Kleen page = alloc_buddy_huge_page(h, vma, addr); 894a1e78772SMel Gorman if (!page) { 895a1e78772SMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 896a1e78772SMel Gorman return ERR_PTR(-VM_FAULT_OOM); 897a1e78772SMel Gorman } 898a1e78772SMel Gorman } 899a1e78772SMel Gorman 900348ea204SAdam Litke set_page_refcounted(page); 9012fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 902a1e78772SMel Gorman 903a5516438SAndi Kleen vma_commit_reservation(h, vma, addr); 904c37f9fb1SAndy Whitcroft 9057893d1d5SAdam Litke return page; 906b45b5bd6SDavid Gibson } 907b45b5bd6SDavid Gibson 908e5ff2159SAndi Kleen static void __init hugetlb_init_one_hstate(struct hstate *h) 9091da177e4SLinus Torvalds { 9101da177e4SLinus Torvalds unsigned long i; 9111da177e4SLinus Torvalds 912a5516438SAndi Kleen for (i = 0; i < MAX_NUMNODES; ++i) 913a5516438SAndi Kleen INIT_LIST_HEAD(&h->hugepage_freelists[i]); 914a5516438SAndi Kleen 915a5516438SAndi Kleen h->hugetlb_next_nid = first_node(node_online_map); 91663b4613cSNishanth Aravamudan 917e5ff2159SAndi Kleen for (i = 0; i < h->max_huge_pages; ++i) { 918a5516438SAndi Kleen if (!alloc_fresh_huge_page(h)) 9191da177e4SLinus Torvalds break; 9201da177e4SLinus Torvalds } 921e5ff2159SAndi Kleen h->max_huge_pages = h->free_huge_pages = h->nr_huge_pages = i; 922e5ff2159SAndi Kleen } 923e5ff2159SAndi Kleen 924e5ff2159SAndi Kleen static void __init hugetlb_init_hstates(void) 925e5ff2159SAndi Kleen { 926e5ff2159SAndi Kleen struct hstate *h; 927e5ff2159SAndi Kleen 928e5ff2159SAndi Kleen for_each_hstate(h) { 929e5ff2159SAndi Kleen hugetlb_init_one_hstate(h); 930e5ff2159SAndi Kleen } 931e5ff2159SAndi Kleen } 932e5ff2159SAndi Kleen 933e5ff2159SAndi Kleen static void __init report_hugepages(void) 934e5ff2159SAndi Kleen { 935e5ff2159SAndi Kleen struct hstate *h; 936e5ff2159SAndi Kleen 937e5ff2159SAndi Kleen for_each_hstate(h) { 938e5ff2159SAndi Kleen printk(KERN_INFO "Total HugeTLB memory allocated, " 939e5ff2159SAndi Kleen "%ld %dMB pages\n", 940e5ff2159SAndi Kleen h->free_huge_pages, 941e5ff2159SAndi Kleen 1 << (h->order + PAGE_SHIFT - 20)); 942e5ff2159SAndi Kleen } 943e5ff2159SAndi Kleen } 944e5ff2159SAndi Kleen 945e5ff2159SAndi Kleen static int __init hugetlb_init(void) 946e5ff2159SAndi Kleen { 947e5ff2159SAndi Kleen BUILD_BUG_ON(HPAGE_SHIFT == 0); 948e5ff2159SAndi Kleen 949e5ff2159SAndi Kleen if (!size_to_hstate(HPAGE_SIZE)) { 950e5ff2159SAndi Kleen hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 951e5ff2159SAndi Kleen parsed_hstate->max_huge_pages = default_hstate_max_huge_pages; 952e5ff2159SAndi Kleen } 953e5ff2159SAndi Kleen default_hstate_idx = size_to_hstate(HPAGE_SIZE) - hstates; 954e5ff2159SAndi Kleen 955e5ff2159SAndi Kleen hugetlb_init_hstates(); 956e5ff2159SAndi Kleen 957e5ff2159SAndi Kleen report_hugepages(); 958e5ff2159SAndi Kleen 9591da177e4SLinus Torvalds return 0; 9601da177e4SLinus Torvalds } 9611da177e4SLinus Torvalds module_init(hugetlb_init); 9621da177e4SLinus Torvalds 963e5ff2159SAndi Kleen /* Should be called on processing a hugepagesz=... option */ 964e5ff2159SAndi Kleen void __init hugetlb_add_hstate(unsigned order) 965e5ff2159SAndi Kleen { 966e5ff2159SAndi Kleen struct hstate *h; 967e5ff2159SAndi Kleen if (size_to_hstate(PAGE_SIZE << order)) { 968e5ff2159SAndi Kleen printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n"); 969e5ff2159SAndi Kleen return; 970e5ff2159SAndi Kleen } 971e5ff2159SAndi Kleen BUG_ON(max_hstate >= HUGE_MAX_HSTATE); 972e5ff2159SAndi Kleen BUG_ON(order == 0); 973e5ff2159SAndi Kleen h = &hstates[max_hstate++]; 974e5ff2159SAndi Kleen h->order = order; 975e5ff2159SAndi Kleen h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); 976e5ff2159SAndi Kleen hugetlb_init_one_hstate(h); 977e5ff2159SAndi Kleen parsed_hstate = h; 978e5ff2159SAndi Kleen } 979e5ff2159SAndi Kleen 9801da177e4SLinus Torvalds static int __init hugetlb_setup(char *s) 9811da177e4SLinus Torvalds { 982e5ff2159SAndi Kleen unsigned long *mhp; 983e5ff2159SAndi Kleen 984e5ff2159SAndi Kleen /* 985e5ff2159SAndi Kleen * !max_hstate means we haven't parsed a hugepagesz= parameter yet, 986e5ff2159SAndi Kleen * so this hugepages= parameter goes to the "default hstate". 987e5ff2159SAndi Kleen */ 988e5ff2159SAndi Kleen if (!max_hstate) 989e5ff2159SAndi Kleen mhp = &default_hstate_max_huge_pages; 990e5ff2159SAndi Kleen else 991e5ff2159SAndi Kleen mhp = &parsed_hstate->max_huge_pages; 992e5ff2159SAndi Kleen 993e5ff2159SAndi Kleen if (sscanf(s, "%lu", mhp) <= 0) 994e5ff2159SAndi Kleen *mhp = 0; 995e5ff2159SAndi Kleen 9961da177e4SLinus Torvalds return 1; 9971da177e4SLinus Torvalds } 9981da177e4SLinus Torvalds __setup("hugepages=", hugetlb_setup); 9991da177e4SLinus Torvalds 10008a630112SKen Chen static unsigned int cpuset_mems_nr(unsigned int *array) 10018a630112SKen Chen { 10028a630112SKen Chen int node; 10038a630112SKen Chen unsigned int nr = 0; 10048a630112SKen Chen 10058a630112SKen Chen for_each_node_mask(node, cpuset_current_mems_allowed) 10068a630112SKen Chen nr += array[node]; 10078a630112SKen Chen 10088a630112SKen Chen return nr; 10098a630112SKen Chen } 10108a630112SKen Chen 10111da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 10121da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 1013a5516438SAndi Kleen static void try_to_free_low(struct hstate *h, unsigned long count) 10141da177e4SLinus Torvalds { 10154415cc8dSChristoph Lameter int i; 10164415cc8dSChristoph Lameter 10171da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 10181da177e4SLinus Torvalds struct page *page, *next; 1019a5516438SAndi Kleen struct list_head *freel = &h->hugepage_freelists[i]; 1020a5516438SAndi Kleen list_for_each_entry_safe(page, next, freel, lru) { 1021a5516438SAndi Kleen if (count >= h->nr_huge_pages) 10226b0c880dSAdam Litke return; 10231da177e4SLinus Torvalds if (PageHighMem(page)) 10241da177e4SLinus Torvalds continue; 10251da177e4SLinus Torvalds list_del(&page->lru); 1026e5ff2159SAndi Kleen update_and_free_page(h, page); 1027a5516438SAndi Kleen h->free_huge_pages--; 1028a5516438SAndi Kleen h->free_huge_pages_node[page_to_nid(page)]--; 10291da177e4SLinus Torvalds } 10301da177e4SLinus Torvalds } 10311da177e4SLinus Torvalds } 10321da177e4SLinus Torvalds #else 1033a5516438SAndi Kleen static inline void try_to_free_low(struct hstate *h, unsigned long count) 10341da177e4SLinus Torvalds { 10351da177e4SLinus Torvalds } 10361da177e4SLinus Torvalds #endif 10371da177e4SLinus Torvalds 1038a5516438SAndi Kleen #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 1039e5ff2159SAndi Kleen static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) 10401da177e4SLinus Torvalds { 10417893d1d5SAdam Litke unsigned long min_count, ret; 10421da177e4SLinus Torvalds 10437893d1d5SAdam Litke /* 10447893d1d5SAdam Litke * Increase the pool size 10457893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 10467893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 1047d1c3fb1fSNishanth Aravamudan * 1048d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 1049d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 1050d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 1051d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 1052d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 10537893d1d5SAdam Litke */ 10541da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 1055a5516438SAndi Kleen while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 1056a5516438SAndi Kleen if (!adjust_pool_surplus(h, -1)) 10577893d1d5SAdam Litke break; 10587893d1d5SAdam Litke } 10597893d1d5SAdam Litke 1060a5516438SAndi Kleen while (count > persistent_huge_pages(h)) { 10617893d1d5SAdam Litke /* 10627893d1d5SAdam Litke * If this allocation races such that we no longer need the 10637893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 10647893d1d5SAdam Litke * and reducing the surplus. 10657893d1d5SAdam Litke */ 10667893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 1067a5516438SAndi Kleen ret = alloc_fresh_huge_page(h); 10687893d1d5SAdam Litke spin_lock(&hugetlb_lock); 10697893d1d5SAdam Litke if (!ret) 10707893d1d5SAdam Litke goto out; 10717893d1d5SAdam Litke 10727893d1d5SAdam Litke } 10737893d1d5SAdam Litke 10747893d1d5SAdam Litke /* 10757893d1d5SAdam Litke * Decrease the pool size 10767893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 10777893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 10787893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 10797893d1d5SAdam Litke * to the desired size as pages become free. 1080d1c3fb1fSNishanth Aravamudan * 1081d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 1082d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 1083d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 1084d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 1085d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 1086d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 1087d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 10887893d1d5SAdam Litke */ 1089a5516438SAndi Kleen min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 10906b0c880dSAdam Litke min_count = max(count, min_count); 1091a5516438SAndi Kleen try_to_free_low(h, min_count); 1092a5516438SAndi Kleen while (min_count < persistent_huge_pages(h)) { 1093a5516438SAndi Kleen struct page *page = dequeue_huge_page(h); 10941da177e4SLinus Torvalds if (!page) 10951da177e4SLinus Torvalds break; 1096a5516438SAndi Kleen update_and_free_page(h, page); 10971da177e4SLinus Torvalds } 1098a5516438SAndi Kleen while (count < persistent_huge_pages(h)) { 1099a5516438SAndi Kleen if (!adjust_pool_surplus(h, 1)) 11007893d1d5SAdam Litke break; 11017893d1d5SAdam Litke } 11027893d1d5SAdam Litke out: 1103a5516438SAndi Kleen ret = persistent_huge_pages(h); 11041da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 11057893d1d5SAdam Litke return ret; 11061da177e4SLinus Torvalds } 11071da177e4SLinus Torvalds 11081da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 11091da177e4SLinus Torvalds struct file *file, void __user *buffer, 11101da177e4SLinus Torvalds size_t *length, loff_t *ppos) 11111da177e4SLinus Torvalds { 1112e5ff2159SAndi Kleen struct hstate *h = &default_hstate; 1113e5ff2159SAndi Kleen unsigned long tmp; 1114e5ff2159SAndi Kleen 1115e5ff2159SAndi Kleen if (!write) 1116e5ff2159SAndi Kleen tmp = h->max_huge_pages; 1117e5ff2159SAndi Kleen 1118e5ff2159SAndi Kleen table->data = &tmp; 1119e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 11201da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 1121e5ff2159SAndi Kleen 1122e5ff2159SAndi Kleen if (write) 1123e5ff2159SAndi Kleen h->max_huge_pages = set_max_huge_pages(h, tmp); 1124e5ff2159SAndi Kleen 11251da177e4SLinus Torvalds return 0; 11261da177e4SLinus Torvalds } 1127396faf03SMel Gorman 1128396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 1129396faf03SMel Gorman struct file *file, void __user *buffer, 1130396faf03SMel Gorman size_t *length, loff_t *ppos) 1131396faf03SMel Gorman { 1132396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 1133396faf03SMel Gorman if (hugepages_treat_as_movable) 1134396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 1135396faf03SMel Gorman else 1136396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 1137396faf03SMel Gorman return 0; 1138396faf03SMel Gorman } 1139396faf03SMel Gorman 1140a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 1141a3d0c6aaSNishanth Aravamudan struct file *file, void __user *buffer, 1142a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 1143a3d0c6aaSNishanth Aravamudan { 1144a5516438SAndi Kleen struct hstate *h = &default_hstate; 1145e5ff2159SAndi Kleen unsigned long tmp; 1146e5ff2159SAndi Kleen 1147e5ff2159SAndi Kleen if (!write) 1148e5ff2159SAndi Kleen tmp = h->nr_overcommit_huge_pages; 1149e5ff2159SAndi Kleen 1150e5ff2159SAndi Kleen table->data = &tmp; 1151e5ff2159SAndi Kleen table->maxlen = sizeof(unsigned long); 1152a3d0c6aaSNishanth Aravamudan proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 1153e5ff2159SAndi Kleen 1154e5ff2159SAndi Kleen if (write) { 1155064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 1156e5ff2159SAndi Kleen h->nr_overcommit_huge_pages = tmp; 1157a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 1158e5ff2159SAndi Kleen } 1159e5ff2159SAndi Kleen 1160a3d0c6aaSNishanth Aravamudan return 0; 1161a3d0c6aaSNishanth Aravamudan } 1162a3d0c6aaSNishanth Aravamudan 11631da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 11641da177e4SLinus Torvalds 11651da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 11661da177e4SLinus Torvalds { 1167a5516438SAndi Kleen struct hstate *h = &default_hstate; 11681da177e4SLinus Torvalds return sprintf(buf, 11691da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 11701da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 1171b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 11727893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 11731da177e4SLinus Torvalds "Hugepagesize: %5lu kB\n", 1174a5516438SAndi Kleen h->nr_huge_pages, 1175a5516438SAndi Kleen h->free_huge_pages, 1176a5516438SAndi Kleen h->resv_huge_pages, 1177a5516438SAndi Kleen h->surplus_huge_pages, 1178a5516438SAndi Kleen 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); 11791da177e4SLinus Torvalds } 11801da177e4SLinus Torvalds 11811da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 11821da177e4SLinus Torvalds { 1183a5516438SAndi Kleen struct hstate *h = &default_hstate; 11841da177e4SLinus Torvalds return sprintf(buf, 11851da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 1186a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 1187a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 1188a5516438SAndi Kleen nid, h->nr_huge_pages_node[nid], 1189a5516438SAndi Kleen nid, h->free_huge_pages_node[nid], 1190a5516438SAndi Kleen nid, h->surplus_huge_pages_node[nid]); 11911da177e4SLinus Torvalds } 11921da177e4SLinus Torvalds 11931da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 11941da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 11951da177e4SLinus Torvalds { 1196a5516438SAndi Kleen struct hstate *h = &default_hstate; 1197a5516438SAndi Kleen return h->nr_huge_pages * pages_per_huge_page(h); 11981da177e4SLinus Torvalds } 11991da177e4SLinus Torvalds 1200a5516438SAndi Kleen static int hugetlb_acct_memory(struct hstate *h, long delta) 1201fc1b8a73SMel Gorman { 1202fc1b8a73SMel Gorman int ret = -ENOMEM; 1203fc1b8a73SMel Gorman 1204fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 1205fc1b8a73SMel Gorman /* 1206fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 1207fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 1208fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 1209fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 1210fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 1211fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 1212fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 1213fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 1214fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 1215fc1b8a73SMel Gorman * 1216fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 1217fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 1218fc1b8a73SMel Gorman * we fall back to check against current free page availability as 1219fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 1220fc1b8a73SMel Gorman * semantics that cpuset has. 1221fc1b8a73SMel Gorman */ 1222fc1b8a73SMel Gorman if (delta > 0) { 1223a5516438SAndi Kleen if (gather_surplus_pages(h, delta) < 0) 1224fc1b8a73SMel Gorman goto out; 1225fc1b8a73SMel Gorman 1226a5516438SAndi Kleen if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { 1227a5516438SAndi Kleen return_unused_surplus_pages(h, delta); 1228fc1b8a73SMel Gorman goto out; 1229fc1b8a73SMel Gorman } 1230fc1b8a73SMel Gorman } 1231fc1b8a73SMel Gorman 1232fc1b8a73SMel Gorman ret = 0; 1233fc1b8a73SMel Gorman if (delta < 0) 1234a5516438SAndi Kleen return_unused_surplus_pages(h, (unsigned long) -delta); 1235fc1b8a73SMel Gorman 1236fc1b8a73SMel Gorman out: 1237fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 1238fc1b8a73SMel Gorman return ret; 1239fc1b8a73SMel Gorman } 1240fc1b8a73SMel Gorman 124184afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 124284afd99bSAndy Whitcroft { 124384afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 124484afd99bSAndy Whitcroft 124584afd99bSAndy Whitcroft /* 124684afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 124784afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 124884afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 124984afd99bSAndy Whitcroft * has a reference to the reservation map it cannot dissappear until 125084afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 125184afd99bSAndy Whitcroft * new reference here without additional locking. 125284afd99bSAndy Whitcroft */ 125384afd99bSAndy Whitcroft if (reservations) 125484afd99bSAndy Whitcroft kref_get(&reservations->refs); 125584afd99bSAndy Whitcroft } 125684afd99bSAndy Whitcroft 1257a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 1258a1e78772SMel Gorman { 1259a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 126084afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 126184afd99bSAndy Whitcroft unsigned long reserve; 126284afd99bSAndy Whitcroft unsigned long start; 126384afd99bSAndy Whitcroft unsigned long end; 126484afd99bSAndy Whitcroft 126584afd99bSAndy Whitcroft if (reservations) { 1266a5516438SAndi Kleen start = vma_hugecache_offset(h, vma, vma->vm_start); 1267a5516438SAndi Kleen end = vma_hugecache_offset(h, vma, vma->vm_end); 126884afd99bSAndy Whitcroft 126984afd99bSAndy Whitcroft reserve = (end - start) - 127084afd99bSAndy Whitcroft region_count(&reservations->regions, start, end); 127184afd99bSAndy Whitcroft 127284afd99bSAndy Whitcroft kref_put(&reservations->refs, resv_map_release); 127384afd99bSAndy Whitcroft 1274a1e78772SMel Gorman if (reserve) 1275a5516438SAndi Kleen hugetlb_acct_memory(h, -reserve); 1276a1e78772SMel Gorman } 127784afd99bSAndy Whitcroft } 1278a1e78772SMel Gorman 12791da177e4SLinus Torvalds /* 12801da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 12811da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 12821da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 12831da177e4SLinus Torvalds * this far. 12841da177e4SLinus Torvalds */ 1285d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 12861da177e4SLinus Torvalds { 12871da177e4SLinus Torvalds BUG(); 1288d0217ac0SNick Piggin return 0; 12891da177e4SLinus Torvalds } 12901da177e4SLinus Torvalds 12911da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 1292d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 129384afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 1294a1e78772SMel Gorman .close = hugetlb_vm_op_close, 12951da177e4SLinus Torvalds }; 12961da177e4SLinus Torvalds 12971e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 12981e8f889bSDavid Gibson int writable) 129963551ae0SDavid Gibson { 130063551ae0SDavid Gibson pte_t entry; 130163551ae0SDavid Gibson 13021e8f889bSDavid Gibson if (writable) { 130363551ae0SDavid Gibson entry = 130463551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 130563551ae0SDavid Gibson } else { 13067f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 130763551ae0SDavid Gibson } 130863551ae0SDavid Gibson entry = pte_mkyoung(entry); 130963551ae0SDavid Gibson entry = pte_mkhuge(entry); 131063551ae0SDavid Gibson 131163551ae0SDavid Gibson return entry; 131263551ae0SDavid Gibson } 131363551ae0SDavid Gibson 13141e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 13151e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 13161e8f889bSDavid Gibson { 13171e8f889bSDavid Gibson pte_t entry; 13181e8f889bSDavid Gibson 13197f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 13207f2e9525SGerald Schaefer if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 13211e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 13221e8f889bSDavid Gibson } 13238dab5241SBenjamin Herrenschmidt } 13241e8f889bSDavid Gibson 13251e8f889bSDavid Gibson 132663551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 132763551ae0SDavid Gibson struct vm_area_struct *vma) 132863551ae0SDavid Gibson { 132963551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 133063551ae0SDavid Gibson struct page *ptepage; 13311c59827dSHugh Dickins unsigned long addr; 13321e8f889bSDavid Gibson int cow; 1333a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1334a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 13351e8f889bSDavid Gibson 13361e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 133763551ae0SDavid Gibson 1338a5516438SAndi Kleen for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 1339c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 1340c74df32cSHugh Dickins if (!src_pte) 1341c74df32cSHugh Dickins continue; 1342a5516438SAndi Kleen dst_pte = huge_pte_alloc(dst, addr, sz); 134363551ae0SDavid Gibson if (!dst_pte) 134463551ae0SDavid Gibson goto nomem; 1345c5c99429SLarry Woodman 1346c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 1347c5c99429SLarry Woodman if (dst_pte == src_pte) 1348c5c99429SLarry Woodman continue; 1349c5c99429SLarry Woodman 1350c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 135146478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 13527f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 13531e8f889bSDavid Gibson if (cow) 13547f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 13557f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 135663551ae0SDavid Gibson ptepage = pte_page(entry); 135763551ae0SDavid Gibson get_page(ptepage); 135863551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 13591c59827dSHugh Dickins } 13601c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 1361c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 136263551ae0SDavid Gibson } 136363551ae0SDavid Gibson return 0; 136463551ae0SDavid Gibson 136563551ae0SDavid Gibson nomem: 136663551ae0SDavid Gibson return -ENOMEM; 136763551ae0SDavid Gibson } 136863551ae0SDavid Gibson 1369502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 137004f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 137163551ae0SDavid Gibson { 137263551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 137363551ae0SDavid Gibson unsigned long address; 1374c7546f8fSDavid Gibson pte_t *ptep; 137563551ae0SDavid Gibson pte_t pte; 137663551ae0SDavid Gibson struct page *page; 1377fe1668aeSChen, Kenneth W struct page *tmp; 1378a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1379a5516438SAndi Kleen unsigned long sz = huge_page_size(h); 1380a5516438SAndi Kleen 1381c0a499c2SChen, Kenneth W /* 1382c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 1383c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 1384c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 1385c0a499c2SChen, Kenneth W */ 1386fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 138763551ae0SDavid Gibson 138863551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 1389a5516438SAndi Kleen BUG_ON(start & ~huge_page_mask(h)); 1390a5516438SAndi Kleen BUG_ON(end & ~huge_page_mask(h)); 139163551ae0SDavid Gibson 1392508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 1393a5516438SAndi Kleen for (address = start; address < end; address += sz) { 1394c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 1395c7546f8fSDavid Gibson if (!ptep) 1396c7546f8fSDavid Gibson continue; 1397c7546f8fSDavid Gibson 139839dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 139939dde65cSChen, Kenneth W continue; 140039dde65cSChen, Kenneth W 140104f2cbe3SMel Gorman /* 140204f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 140304f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 140404f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 140504f2cbe3SMel Gorman */ 140604f2cbe3SMel Gorman if (ref_page) { 140704f2cbe3SMel Gorman pte = huge_ptep_get(ptep); 140804f2cbe3SMel Gorman if (huge_pte_none(pte)) 140904f2cbe3SMel Gorman continue; 141004f2cbe3SMel Gorman page = pte_page(pte); 141104f2cbe3SMel Gorman if (page != ref_page) 141204f2cbe3SMel Gorman continue; 141304f2cbe3SMel Gorman 141404f2cbe3SMel Gorman /* 141504f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 141604f2cbe3SMel Gorman * future faults in this VMA will fail rather than 141704f2cbe3SMel Gorman * looking like data was lost 141804f2cbe3SMel Gorman */ 141904f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 142004f2cbe3SMel Gorman } 142104f2cbe3SMel Gorman 1422c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 14237f2e9525SGerald Schaefer if (huge_pte_none(pte)) 142463551ae0SDavid Gibson continue; 1425c7546f8fSDavid Gibson 142663551ae0SDavid Gibson page = pte_page(pte); 14276649a386SKen Chen if (pte_dirty(pte)) 14286649a386SKen Chen set_page_dirty(page); 1429fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 143063551ae0SDavid Gibson } 14311da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 1432508034a3SHugh Dickins flush_tlb_range(vma, start, end); 1433fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 1434fe1668aeSChen, Kenneth W list_del(&page->lru); 1435fe1668aeSChen, Kenneth W put_page(page); 1436fe1668aeSChen, Kenneth W } 14371da177e4SLinus Torvalds } 143863551ae0SDavid Gibson 1439502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 144004f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 1441502717f4SChen, Kenneth W { 1442502717f4SChen, Kenneth W /* 1443502717f4SChen, Kenneth W * It is undesirable to test vma->vm_file as it should be non-null 1444502717f4SChen, Kenneth W * for valid hugetlb area. However, vm_file will be NULL in the error 1445502717f4SChen, Kenneth W * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 1446502717f4SChen, Kenneth W * do_mmap_pgoff() nullifies vma->vm_file before calling this function 1447502717f4SChen, Kenneth W * to clean up. Since no pte has actually been setup, it is safe to 1448502717f4SChen, Kenneth W * do nothing in this case. 1449502717f4SChen, Kenneth W */ 1450502717f4SChen, Kenneth W if (vma->vm_file) { 1451502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 145204f2cbe3SMel Gorman __unmap_hugepage_range(vma, start, end, ref_page); 1453502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 1454502717f4SChen, Kenneth W } 1455502717f4SChen, Kenneth W } 1456502717f4SChen, Kenneth W 145704f2cbe3SMel Gorman /* 145804f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 145904f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 146004f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 146104f2cbe3SMel Gorman * same region. 146204f2cbe3SMel Gorman */ 146304f2cbe3SMel Gorman int unmap_ref_private(struct mm_struct *mm, 146404f2cbe3SMel Gorman struct vm_area_struct *vma, 146504f2cbe3SMel Gorman struct page *page, 146604f2cbe3SMel Gorman unsigned long address) 146704f2cbe3SMel Gorman { 146804f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 146904f2cbe3SMel Gorman struct address_space *mapping; 147004f2cbe3SMel Gorman struct prio_tree_iter iter; 147104f2cbe3SMel Gorman pgoff_t pgoff; 147204f2cbe3SMel Gorman 147304f2cbe3SMel Gorman /* 147404f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 147504f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 147604f2cbe3SMel Gorman */ 147704f2cbe3SMel Gorman address = address & huge_page_mask(hstate_vma(vma)); 147804f2cbe3SMel Gorman pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) 147904f2cbe3SMel Gorman + (vma->vm_pgoff >> PAGE_SHIFT); 148004f2cbe3SMel Gorman mapping = (struct address_space *)page_private(page); 148104f2cbe3SMel Gorman 148204f2cbe3SMel Gorman vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 148304f2cbe3SMel Gorman /* Do not unmap the current VMA */ 148404f2cbe3SMel Gorman if (iter_vma == vma) 148504f2cbe3SMel Gorman continue; 148604f2cbe3SMel Gorman 148704f2cbe3SMel Gorman /* 148804f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 148904f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 149004f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 149104f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 149204f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 149304f2cbe3SMel Gorman */ 149404f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 149504f2cbe3SMel Gorman unmap_hugepage_range(iter_vma, 149604f2cbe3SMel Gorman address, address + HPAGE_SIZE, 149704f2cbe3SMel Gorman page); 149804f2cbe3SMel Gorman } 149904f2cbe3SMel Gorman 150004f2cbe3SMel Gorman return 1; 150104f2cbe3SMel Gorman } 150204f2cbe3SMel Gorman 15031e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 150404f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 150504f2cbe3SMel Gorman struct page *pagecache_page) 15061e8f889bSDavid Gibson { 1507a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 15081e8f889bSDavid Gibson struct page *old_page, *new_page; 150979ac6ba4SDavid Gibson int avoidcopy; 151004f2cbe3SMel Gorman int outside_reserve = 0; 15111e8f889bSDavid Gibson 15121e8f889bSDavid Gibson old_page = pte_page(pte); 15131e8f889bSDavid Gibson 151404f2cbe3SMel Gorman retry_avoidcopy: 15151e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 15161e8f889bSDavid Gibson * and just make the page writable */ 15171e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 15181e8f889bSDavid Gibson if (avoidcopy) { 15191e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 152083c54070SNick Piggin return 0; 15211e8f889bSDavid Gibson } 15221e8f889bSDavid Gibson 152304f2cbe3SMel Gorman /* 152404f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 152504f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 152604f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 152704f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 152804f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 152904f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 153004f2cbe3SMel Gorman * of the full address range. 153104f2cbe3SMel Gorman */ 153204f2cbe3SMel Gorman if (!(vma->vm_flags & VM_SHARED) && 153304f2cbe3SMel Gorman is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 153404f2cbe3SMel Gorman old_page != pagecache_page) 153504f2cbe3SMel Gorman outside_reserve = 1; 153604f2cbe3SMel Gorman 15371e8f889bSDavid Gibson page_cache_get(old_page); 153804f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 15391e8f889bSDavid Gibson 15402fc39cecSAdam Litke if (IS_ERR(new_page)) { 15411e8f889bSDavid Gibson page_cache_release(old_page); 154204f2cbe3SMel Gorman 154304f2cbe3SMel Gorman /* 154404f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 154504f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 154604f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 154704f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 154804f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 154904f2cbe3SMel Gorman */ 155004f2cbe3SMel Gorman if (outside_reserve) { 155104f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 155204f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 155304f2cbe3SMel Gorman BUG_ON(page_count(old_page) != 1); 155404f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 155504f2cbe3SMel Gorman goto retry_avoidcopy; 155604f2cbe3SMel Gorman } 155704f2cbe3SMel Gorman WARN_ON_ONCE(1); 155804f2cbe3SMel Gorman } 155904f2cbe3SMel Gorman 15602fc39cecSAdam Litke return -PTR_ERR(new_page); 15611e8f889bSDavid Gibson } 15621e8f889bSDavid Gibson 15631e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 15649de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 15650ed361deSNick Piggin __SetPageUptodate(new_page); 15661e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 15671e8f889bSDavid Gibson 1568a5516438SAndi Kleen ptep = huge_pte_offset(mm, address & huge_page_mask(h)); 15697f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 15701e8f889bSDavid Gibson /* Break COW */ 15718fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 15721e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 15731e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 15741e8f889bSDavid Gibson /* Make the old page be freed below */ 15751e8f889bSDavid Gibson new_page = old_page; 15761e8f889bSDavid Gibson } 15771e8f889bSDavid Gibson page_cache_release(new_page); 15781e8f889bSDavid Gibson page_cache_release(old_page); 157983c54070SNick Piggin return 0; 15801e8f889bSDavid Gibson } 15811e8f889bSDavid Gibson 158204f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 1583a5516438SAndi Kleen static struct page *hugetlbfs_pagecache_page(struct hstate *h, 1584a5516438SAndi Kleen struct vm_area_struct *vma, unsigned long address) 158504f2cbe3SMel Gorman { 158604f2cbe3SMel Gorman struct address_space *mapping; 1587e7c4b0bfSAndy Whitcroft pgoff_t idx; 158804f2cbe3SMel Gorman 158904f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 1590a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 159104f2cbe3SMel Gorman 159204f2cbe3SMel Gorman return find_lock_page(mapping, idx); 159304f2cbe3SMel Gorman } 159404f2cbe3SMel Gorman 1595a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 15961e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 1597ac9b9c66SHugh Dickins { 1598a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 1599ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 1600e7c4b0bfSAndy Whitcroft pgoff_t idx; 16014c887265SAdam Litke unsigned long size; 16024c887265SAdam Litke struct page *page; 16034c887265SAdam Litke struct address_space *mapping; 16041e8f889bSDavid Gibson pte_t new_pte; 16054c887265SAdam Litke 160604f2cbe3SMel Gorman /* 160704f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 160804f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 160904f2cbe3SMel Gorman * COW. Warn that such a situation has occured as it may not be obvious 161004f2cbe3SMel Gorman */ 161104f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 161204f2cbe3SMel Gorman printk(KERN_WARNING 161304f2cbe3SMel Gorman "PID %d killed due to inadequate hugepage pool\n", 161404f2cbe3SMel Gorman current->pid); 161504f2cbe3SMel Gorman return ret; 161604f2cbe3SMel Gorman } 161704f2cbe3SMel Gorman 16184c887265SAdam Litke mapping = vma->vm_file->f_mapping; 1619a5516438SAndi Kleen idx = vma_hugecache_offset(h, vma, address); 16204c887265SAdam Litke 16214c887265SAdam Litke /* 16224c887265SAdam Litke * Use page lock to guard against racing truncation 16234c887265SAdam Litke * before we get page_table_lock. 16244c887265SAdam Litke */ 16256bda666aSChristoph Lameter retry: 16266bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 16276bda666aSChristoph Lameter if (!page) { 1628a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 1629ebed4bfcSHugh Dickins if (idx >= size) 1630ebed4bfcSHugh Dickins goto out; 163104f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 16322fc39cecSAdam Litke if (IS_ERR(page)) { 16332fc39cecSAdam Litke ret = -PTR_ERR(page); 16346bda666aSChristoph Lameter goto out; 16356bda666aSChristoph Lameter } 1636a5516438SAndi Kleen clear_huge_page(page, address, huge_page_size(h)); 16370ed361deSNick Piggin __SetPageUptodate(page); 1638ac9b9c66SHugh Dickins 16396bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 16406bda666aSChristoph Lameter int err; 164145c682a6SKen Chen struct inode *inode = mapping->host; 16426bda666aSChristoph Lameter 16436bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 16446bda666aSChristoph Lameter if (err) { 16456bda666aSChristoph Lameter put_page(page); 16466bda666aSChristoph Lameter if (err == -EEXIST) 16476bda666aSChristoph Lameter goto retry; 16486bda666aSChristoph Lameter goto out; 16496bda666aSChristoph Lameter } 165045c682a6SKen Chen 165145c682a6SKen Chen spin_lock(&inode->i_lock); 1652a5516438SAndi Kleen inode->i_blocks += blocks_per_huge_page(h); 165345c682a6SKen Chen spin_unlock(&inode->i_lock); 16546bda666aSChristoph Lameter } else 16556bda666aSChristoph Lameter lock_page(page); 16566bda666aSChristoph Lameter } 16571e8f889bSDavid Gibson 1658ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 1659a5516438SAndi Kleen size = i_size_read(mapping->host) >> huge_page_shift(h); 16604c887265SAdam Litke if (idx >= size) 16614c887265SAdam Litke goto backout; 16624c887265SAdam Litke 166383c54070SNick Piggin ret = 0; 16647f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 16654c887265SAdam Litke goto backout; 16664c887265SAdam Litke 16671e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 16681e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 16691e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 16701e8f889bSDavid Gibson 16711e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 16721e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 167304f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 16741e8f889bSDavid Gibson } 16751e8f889bSDavid Gibson 1676ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 16774c887265SAdam Litke unlock_page(page); 16784c887265SAdam Litke out: 1679ac9b9c66SHugh Dickins return ret; 16804c887265SAdam Litke 16814c887265SAdam Litke backout: 16824c887265SAdam Litke spin_unlock(&mm->page_table_lock); 16834c887265SAdam Litke unlock_page(page); 16844c887265SAdam Litke put_page(page); 16854c887265SAdam Litke goto out; 1686ac9b9c66SHugh Dickins } 1687ac9b9c66SHugh Dickins 168886e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 168986e5216fSAdam Litke unsigned long address, int write_access) 169086e5216fSAdam Litke { 169186e5216fSAdam Litke pte_t *ptep; 169286e5216fSAdam Litke pte_t entry; 16931e8f889bSDavid Gibson int ret; 16943935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 1695a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 169686e5216fSAdam Litke 1697a5516438SAndi Kleen ptep = huge_pte_alloc(mm, address, huge_page_size(h)); 169886e5216fSAdam Litke if (!ptep) 169986e5216fSAdam Litke return VM_FAULT_OOM; 170086e5216fSAdam Litke 17013935baa9SDavid Gibson /* 17023935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 17033935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 17043935baa9SDavid Gibson * the same page in the page cache. 17053935baa9SDavid Gibson */ 17063935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 17077f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 17087f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 17093935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 17103935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 17113935baa9SDavid Gibson return ret; 17123935baa9SDavid Gibson } 171386e5216fSAdam Litke 171483c54070SNick Piggin ret = 0; 17151e8f889bSDavid Gibson 17161e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 17171e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 17187f2e9525SGerald Schaefer if (likely(pte_same(entry, huge_ptep_get(ptep)))) 171904f2cbe3SMel Gorman if (write_access && !pte_write(entry)) { 172004f2cbe3SMel Gorman struct page *page; 1721a5516438SAndi Kleen page = hugetlbfs_pagecache_page(h, vma, address); 172204f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, entry, page); 172304f2cbe3SMel Gorman if (page) { 172404f2cbe3SMel Gorman unlock_page(page); 172504f2cbe3SMel Gorman put_page(page); 172604f2cbe3SMel Gorman } 172704f2cbe3SMel Gorman } 17281e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 17293935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 17301e8f889bSDavid Gibson 17311e8f889bSDavid Gibson return ret; 173286e5216fSAdam Litke } 173386e5216fSAdam Litke 173463551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 173563551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 17365b23dbe8SAdam Litke unsigned long *position, int *length, int i, 17375b23dbe8SAdam Litke int write) 173863551ae0SDavid Gibson { 1739d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 1740d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 174163551ae0SDavid Gibson int remainder = *length; 1742a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 174363551ae0SDavid Gibson 17441c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 174563551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 174663551ae0SDavid Gibson pte_t *pte; 174763551ae0SDavid Gibson struct page *page; 174863551ae0SDavid Gibson 17494c887265SAdam Litke /* 17504c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 17514c887265SAdam Litke * each hugepage. We have to make * sure we get the 17524c887265SAdam Litke * first, for the page indexing below to work. 17534c887265SAdam Litke */ 1754a5516438SAndi Kleen pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); 175563551ae0SDavid Gibson 17567f2e9525SGerald Schaefer if (!pte || huge_pte_none(huge_ptep_get(pte)) || 17577f2e9525SGerald Schaefer (write && !pte_write(huge_ptep_get(pte)))) { 17584c887265SAdam Litke int ret; 17594c887265SAdam Litke 17604c887265SAdam Litke spin_unlock(&mm->page_table_lock); 17615b23dbe8SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, write); 17624c887265SAdam Litke spin_lock(&mm->page_table_lock); 1763a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 17644c887265SAdam Litke continue; 17654c887265SAdam Litke 17661c59827dSHugh Dickins remainder = 0; 17671c59827dSHugh Dickins if (!i) 17681c59827dSHugh Dickins i = -EFAULT; 17691c59827dSHugh Dickins break; 17701c59827dSHugh Dickins } 177163551ae0SDavid Gibson 1772a5516438SAndi Kleen pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 17737f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 1774d5d4b0aaSChen, Kenneth W same_page: 1775d6692183SChen, Kenneth W if (pages) { 177663551ae0SDavid Gibson get_page(page); 1777d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 1778d6692183SChen, Kenneth W } 177963551ae0SDavid Gibson 178063551ae0SDavid Gibson if (vmas) 178163551ae0SDavid Gibson vmas[i] = vma; 178263551ae0SDavid Gibson 178363551ae0SDavid Gibson vaddr += PAGE_SIZE; 1784d5d4b0aaSChen, Kenneth W ++pfn_offset; 178563551ae0SDavid Gibson --remainder; 178663551ae0SDavid Gibson ++i; 1787d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 1788a5516438SAndi Kleen pfn_offset < pages_per_huge_page(h)) { 1789d5d4b0aaSChen, Kenneth W /* 1790d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 1791d5d4b0aaSChen, Kenneth W * of this compound page. 1792d5d4b0aaSChen, Kenneth W */ 1793d5d4b0aaSChen, Kenneth W goto same_page; 1794d5d4b0aaSChen, Kenneth W } 179563551ae0SDavid Gibson } 17961c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 179763551ae0SDavid Gibson *length = remainder; 179863551ae0SDavid Gibson *position = vaddr; 179963551ae0SDavid Gibson 180063551ae0SDavid Gibson return i; 180163551ae0SDavid Gibson } 18028f860591SZhang, Yanmin 18038f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 18048f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 18058f860591SZhang, Yanmin { 18068f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 18078f860591SZhang, Yanmin unsigned long start = address; 18088f860591SZhang, Yanmin pte_t *ptep; 18098f860591SZhang, Yanmin pte_t pte; 1810a5516438SAndi Kleen struct hstate *h = hstate_vma(vma); 18118f860591SZhang, Yanmin 18128f860591SZhang, Yanmin BUG_ON(address >= end); 18138f860591SZhang, Yanmin flush_cache_range(vma, address, end); 18148f860591SZhang, Yanmin 181539dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 18168f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 1817a5516438SAndi Kleen for (; address < end; address += huge_page_size(h)) { 18188f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 18198f860591SZhang, Yanmin if (!ptep) 18208f860591SZhang, Yanmin continue; 182139dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 182239dde65cSChen, Kenneth W continue; 18237f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 18248f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 18258f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 18268f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 18278f860591SZhang, Yanmin } 18288f860591SZhang, Yanmin } 18298f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 183039dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 18318f860591SZhang, Yanmin 18328f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 18338f860591SZhang, Yanmin } 18348f860591SZhang, Yanmin 1835a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 1836a1e78772SMel Gorman long from, long to, 1837a1e78772SMel Gorman struct vm_area_struct *vma) 1838e4e574b7SAdam Litke { 1839e4e574b7SAdam Litke long ret, chg; 1840a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 1841e4e574b7SAdam Litke 1842c37f9fb1SAndy Whitcroft if (vma && vma->vm_flags & VM_NORESERVE) 1843c37f9fb1SAndy Whitcroft return 0; 1844c37f9fb1SAndy Whitcroft 1845a1e78772SMel Gorman /* 1846a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 1847a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 1848a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 1849a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 1850a1e78772SMel Gorman */ 1851a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 1852e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 1853a1e78772SMel Gorman else { 185484afd99bSAndy Whitcroft struct resv_map *resv_map = resv_map_alloc(); 185584afd99bSAndy Whitcroft if (!resv_map) 185684afd99bSAndy Whitcroft return -ENOMEM; 185784afd99bSAndy Whitcroft 1858a1e78772SMel Gorman chg = to - from; 185984afd99bSAndy Whitcroft 186084afd99bSAndy Whitcroft set_vma_resv_map(vma, resv_map); 186104f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 1862a1e78772SMel Gorman } 1863a1e78772SMel Gorman 1864e4e574b7SAdam Litke if (chg < 0) 1865e4e574b7SAdam Litke return chg; 18668a630112SKen Chen 186790d8b7e6SAdam Litke if (hugetlb_get_quota(inode->i_mapping, chg)) 186890d8b7e6SAdam Litke return -ENOSPC; 1869a5516438SAndi Kleen ret = hugetlb_acct_memory(h, chg); 187068842c9bSKen Chen if (ret < 0) { 187168842c9bSKen Chen hugetlb_put_quota(inode->i_mapping, chg); 1872a43a8c39SChen, Kenneth W return ret; 187368842c9bSKen Chen } 1874a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 1875a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 1876a43a8c39SChen, Kenneth W return 0; 1877a43a8c39SChen, Kenneth W } 1878a43a8c39SChen, Kenneth W 1879a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 1880a43a8c39SChen, Kenneth W { 1881a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 1882a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 188345c682a6SKen Chen 188445c682a6SKen Chen spin_lock(&inode->i_lock); 1885a5516438SAndi Kleen inode->i_blocks -= blocks_per_huge_page(h); 188645c682a6SKen Chen spin_unlock(&inode->i_lock); 188745c682a6SKen Chen 188890d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 1889a5516438SAndi Kleen hugetlb_acct_memory(h, -(chg - freed)); 1890a43a8c39SChen, Kenneth W } 1891