11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Generic hugetlb support. 31da177e4SLinus Torvalds * (C) William Irwin, April 2004 41da177e4SLinus Torvalds */ 51da177e4SLinus Torvalds #include <linux/gfp.h> 61da177e4SLinus Torvalds #include <linux/list.h> 71da177e4SLinus Torvalds #include <linux/init.h> 81da177e4SLinus Torvalds #include <linux/module.h> 91da177e4SLinus Torvalds #include <linux/mm.h> 101da177e4SLinus Torvalds #include <linux/sysctl.h> 111da177e4SLinus Torvalds #include <linux/highmem.h> 121da177e4SLinus Torvalds #include <linux/nodemask.h> 1363551ae0SDavid Gibson #include <linux/pagemap.h> 145da7ca86SChristoph Lameter #include <linux/mempolicy.h> 15aea47ff3SChristoph Lameter #include <linux/cpuset.h> 163935baa9SDavid Gibson #include <linux/mutex.h> 175da7ca86SChristoph Lameter 1863551ae0SDavid Gibson #include <asm/page.h> 1963551ae0SDavid Gibson #include <asm/pgtable.h> 2063551ae0SDavid Gibson 2163551ae0SDavid Gibson #include <linux/hugetlb.h> 227835e98bSNick Piggin #include "internal.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25a43a8c39SChen, Kenneth W static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 267893d1d5SAdam Litke static unsigned long surplus_huge_pages; 27064d9efeSNishanth Aravamudan static unsigned long nr_overcommit_huge_pages; 281da177e4SLinus Torvalds unsigned long max_huge_pages; 29064d9efeSNishanth Aravamudan unsigned long sysctl_overcommit_huge_pages; 301da177e4SLinus Torvalds static struct list_head hugepage_freelists[MAX_NUMNODES]; 311da177e4SLinus Torvalds static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 321da177e4SLinus Torvalds static unsigned int free_huge_pages_node[MAX_NUMNODES]; 337893d1d5SAdam Litke static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 34396faf03SMel Gorman static gfp_t htlb_alloc_mask = GFP_HIGHUSER; 35396faf03SMel Gorman unsigned long hugepages_treat_as_movable; 3663b4613cSNishanth Aravamudan static int hugetlb_next_nid; 37396faf03SMel Gorman 383935baa9SDavid Gibson /* 393935baa9SDavid Gibson * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 403935baa9SDavid Gibson */ 413935baa9SDavid Gibson static DEFINE_SPINLOCK(hugetlb_lock); 420bd0f9fbSEric Paris 43e7c4b0bfSAndy Whitcroft /* 4496822904SAndy Whitcroft * Region tracking -- allows tracking of reservations and instantiated pages 4596822904SAndy Whitcroft * across the pages in a mapping. 4684afd99bSAndy Whitcroft * 4784afd99bSAndy Whitcroft * The region data structures are protected by a combination of the mmap_sem 4884afd99bSAndy Whitcroft * and the hugetlb_instantion_mutex. To access or modify a region the caller 4984afd99bSAndy Whitcroft * must either hold the mmap_sem for write, or the mmap_sem for read and 5084afd99bSAndy Whitcroft * the hugetlb_instantiation mutex: 5184afd99bSAndy Whitcroft * 5284afd99bSAndy Whitcroft * down_write(&mm->mmap_sem); 5384afd99bSAndy Whitcroft * or 5484afd99bSAndy Whitcroft * down_read(&mm->mmap_sem); 5584afd99bSAndy Whitcroft * mutex_lock(&hugetlb_instantiation_mutex); 5696822904SAndy Whitcroft */ 5796822904SAndy Whitcroft struct file_region { 5896822904SAndy Whitcroft struct list_head link; 5996822904SAndy Whitcroft long from; 6096822904SAndy Whitcroft long to; 6196822904SAndy Whitcroft }; 6296822904SAndy Whitcroft 6396822904SAndy Whitcroft static long region_add(struct list_head *head, long f, long t) 6496822904SAndy Whitcroft { 6596822904SAndy Whitcroft struct file_region *rg, *nrg, *trg; 6696822904SAndy Whitcroft 6796822904SAndy Whitcroft /* Locate the region we are either in or before. */ 6896822904SAndy Whitcroft list_for_each_entry(rg, head, link) 6996822904SAndy Whitcroft if (f <= rg->to) 7096822904SAndy Whitcroft break; 7196822904SAndy Whitcroft 7296822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 7396822904SAndy Whitcroft if (f > rg->from) 7496822904SAndy Whitcroft f = rg->from; 7596822904SAndy Whitcroft 7696822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 7796822904SAndy Whitcroft nrg = rg; 7896822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 7996822904SAndy Whitcroft if (&rg->link == head) 8096822904SAndy Whitcroft break; 8196822904SAndy Whitcroft if (rg->from > t) 8296822904SAndy Whitcroft break; 8396822904SAndy Whitcroft 8496822904SAndy Whitcroft /* If this area reaches higher then extend our area to 8596822904SAndy Whitcroft * include it completely. If this is not the first area 8696822904SAndy Whitcroft * which we intend to reuse, free it. */ 8796822904SAndy Whitcroft if (rg->to > t) 8896822904SAndy Whitcroft t = rg->to; 8996822904SAndy Whitcroft if (rg != nrg) { 9096822904SAndy Whitcroft list_del(&rg->link); 9196822904SAndy Whitcroft kfree(rg); 9296822904SAndy Whitcroft } 9396822904SAndy Whitcroft } 9496822904SAndy Whitcroft nrg->from = f; 9596822904SAndy Whitcroft nrg->to = t; 9696822904SAndy Whitcroft return 0; 9796822904SAndy Whitcroft } 9896822904SAndy Whitcroft 9996822904SAndy Whitcroft static long region_chg(struct list_head *head, long f, long t) 10096822904SAndy Whitcroft { 10196822904SAndy Whitcroft struct file_region *rg, *nrg; 10296822904SAndy Whitcroft long chg = 0; 10396822904SAndy Whitcroft 10496822904SAndy Whitcroft /* Locate the region we are before or in. */ 10596822904SAndy Whitcroft list_for_each_entry(rg, head, link) 10696822904SAndy Whitcroft if (f <= rg->to) 10796822904SAndy Whitcroft break; 10896822904SAndy Whitcroft 10996822904SAndy Whitcroft /* If we are below the current region then a new region is required. 11096822904SAndy Whitcroft * Subtle, allocate a new region at the position but make it zero 11196822904SAndy Whitcroft * size such that we can guarantee to record the reservation. */ 11296822904SAndy Whitcroft if (&rg->link == head || t < rg->from) { 11396822904SAndy Whitcroft nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 11496822904SAndy Whitcroft if (!nrg) 11596822904SAndy Whitcroft return -ENOMEM; 11696822904SAndy Whitcroft nrg->from = f; 11796822904SAndy Whitcroft nrg->to = f; 11896822904SAndy Whitcroft INIT_LIST_HEAD(&nrg->link); 11996822904SAndy Whitcroft list_add(&nrg->link, rg->link.prev); 12096822904SAndy Whitcroft 12196822904SAndy Whitcroft return t - f; 12296822904SAndy Whitcroft } 12396822904SAndy Whitcroft 12496822904SAndy Whitcroft /* Round our left edge to the current segment if it encloses us. */ 12596822904SAndy Whitcroft if (f > rg->from) 12696822904SAndy Whitcroft f = rg->from; 12796822904SAndy Whitcroft chg = t - f; 12896822904SAndy Whitcroft 12996822904SAndy Whitcroft /* Check for and consume any regions we now overlap with. */ 13096822904SAndy Whitcroft list_for_each_entry(rg, rg->link.prev, link) { 13196822904SAndy Whitcroft if (&rg->link == head) 13296822904SAndy Whitcroft break; 13396822904SAndy Whitcroft if (rg->from > t) 13496822904SAndy Whitcroft return chg; 13596822904SAndy Whitcroft 13696822904SAndy Whitcroft /* We overlap with this area, if it extends futher than 13796822904SAndy Whitcroft * us then we must extend ourselves. Account for its 13896822904SAndy Whitcroft * existing reservation. */ 13996822904SAndy Whitcroft if (rg->to > t) { 14096822904SAndy Whitcroft chg += rg->to - t; 14196822904SAndy Whitcroft t = rg->to; 14296822904SAndy Whitcroft } 14396822904SAndy Whitcroft chg -= rg->to - rg->from; 14496822904SAndy Whitcroft } 14596822904SAndy Whitcroft return chg; 14696822904SAndy Whitcroft } 14796822904SAndy Whitcroft 14896822904SAndy Whitcroft static long region_truncate(struct list_head *head, long end) 14996822904SAndy Whitcroft { 15096822904SAndy Whitcroft struct file_region *rg, *trg; 15196822904SAndy Whitcroft long chg = 0; 15296822904SAndy Whitcroft 15396822904SAndy Whitcroft /* Locate the region we are either in or before. */ 15496822904SAndy Whitcroft list_for_each_entry(rg, head, link) 15596822904SAndy Whitcroft if (end <= rg->to) 15696822904SAndy Whitcroft break; 15796822904SAndy Whitcroft if (&rg->link == head) 15896822904SAndy Whitcroft return 0; 15996822904SAndy Whitcroft 16096822904SAndy Whitcroft /* If we are in the middle of a region then adjust it. */ 16196822904SAndy Whitcroft if (end > rg->from) { 16296822904SAndy Whitcroft chg = rg->to - end; 16396822904SAndy Whitcroft rg->to = end; 16496822904SAndy Whitcroft rg = list_entry(rg->link.next, typeof(*rg), link); 16596822904SAndy Whitcroft } 16696822904SAndy Whitcroft 16796822904SAndy Whitcroft /* Drop any remaining regions. */ 16896822904SAndy Whitcroft list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 16996822904SAndy Whitcroft if (&rg->link == head) 17096822904SAndy Whitcroft break; 17196822904SAndy Whitcroft chg += rg->to - rg->from; 17296822904SAndy Whitcroft list_del(&rg->link); 17396822904SAndy Whitcroft kfree(rg); 17496822904SAndy Whitcroft } 17596822904SAndy Whitcroft return chg; 17696822904SAndy Whitcroft } 17796822904SAndy Whitcroft 17884afd99bSAndy Whitcroft static long region_count(struct list_head *head, long f, long t) 17984afd99bSAndy Whitcroft { 18084afd99bSAndy Whitcroft struct file_region *rg; 18184afd99bSAndy Whitcroft long chg = 0; 18284afd99bSAndy Whitcroft 18384afd99bSAndy Whitcroft /* Locate each segment we overlap with, and count that overlap. */ 18484afd99bSAndy Whitcroft list_for_each_entry(rg, head, link) { 18584afd99bSAndy Whitcroft int seg_from; 18684afd99bSAndy Whitcroft int seg_to; 18784afd99bSAndy Whitcroft 18884afd99bSAndy Whitcroft if (rg->to <= f) 18984afd99bSAndy Whitcroft continue; 19084afd99bSAndy Whitcroft if (rg->from >= t) 19184afd99bSAndy Whitcroft break; 19284afd99bSAndy Whitcroft 19384afd99bSAndy Whitcroft seg_from = max(rg->from, f); 19484afd99bSAndy Whitcroft seg_to = min(rg->to, t); 19584afd99bSAndy Whitcroft 19684afd99bSAndy Whitcroft chg += seg_to - seg_from; 19784afd99bSAndy Whitcroft } 19884afd99bSAndy Whitcroft 19984afd99bSAndy Whitcroft return chg; 20084afd99bSAndy Whitcroft } 20184afd99bSAndy Whitcroft 20296822904SAndy Whitcroft /* 203e7c4b0bfSAndy Whitcroft * Convert the address within this vma to the page offset within 204e7c4b0bfSAndy Whitcroft * the mapping, in pagecache page units; huge pages here. 205e7c4b0bfSAndy Whitcroft */ 206a858f7b2SJohannes Weiner static pgoff_t vma_hugecache_offset(struct vm_area_struct *vma, 207e7c4b0bfSAndy Whitcroft unsigned long address) 208e7c4b0bfSAndy Whitcroft { 209e7c4b0bfSAndy Whitcroft return ((address - vma->vm_start) >> HPAGE_SHIFT) + 210e7c4b0bfSAndy Whitcroft (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 211e7c4b0bfSAndy Whitcroft } 212e7c4b0bfSAndy Whitcroft 21384afd99bSAndy Whitcroft /* 21484afd99bSAndy Whitcroft * Flags for MAP_PRIVATE reservations. These are stored in the bottom 21584afd99bSAndy Whitcroft * bits of the reservation map pointer, which are always clear due to 21684afd99bSAndy Whitcroft * alignment. 21784afd99bSAndy Whitcroft */ 21884afd99bSAndy Whitcroft #define HPAGE_RESV_OWNER (1UL << 0) 21984afd99bSAndy Whitcroft #define HPAGE_RESV_UNMAPPED (1UL << 1) 22004f2cbe3SMel Gorman #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 22184afd99bSAndy Whitcroft 222a1e78772SMel Gorman /* 223a1e78772SMel Gorman * These helpers are used to track how many pages are reserved for 224a1e78772SMel Gorman * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 225a1e78772SMel Gorman * is guaranteed to have their future faults succeed. 226a1e78772SMel Gorman * 227a1e78772SMel Gorman * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 228a1e78772SMel Gorman * the reserve counters are updated with the hugetlb_lock held. It is safe 229a1e78772SMel Gorman * to reset the VMA at fork() time as it is not in use yet and there is no 230a1e78772SMel Gorman * chance of the global counters getting corrupted as a result of the values. 23184afd99bSAndy Whitcroft * 23284afd99bSAndy Whitcroft * The private mapping reservation is represented in a subtly different 23384afd99bSAndy Whitcroft * manner to a shared mapping. A shared mapping has a region map associated 23484afd99bSAndy Whitcroft * with the underlying file, this region map represents the backing file 23584afd99bSAndy Whitcroft * pages which have ever had a reservation assigned which this persists even 23684afd99bSAndy Whitcroft * after the page is instantiated. A private mapping has a region map 23784afd99bSAndy Whitcroft * associated with the original mmap which is attached to all VMAs which 23884afd99bSAndy Whitcroft * reference it, this region map represents those offsets which have consumed 23984afd99bSAndy Whitcroft * reservation ie. where pages have been instantiated. 240a1e78772SMel Gorman */ 241e7c4b0bfSAndy Whitcroft static unsigned long get_vma_private_data(struct vm_area_struct *vma) 242e7c4b0bfSAndy Whitcroft { 243e7c4b0bfSAndy Whitcroft return (unsigned long)vma->vm_private_data; 244e7c4b0bfSAndy Whitcroft } 245e7c4b0bfSAndy Whitcroft 246e7c4b0bfSAndy Whitcroft static void set_vma_private_data(struct vm_area_struct *vma, 247e7c4b0bfSAndy Whitcroft unsigned long value) 248e7c4b0bfSAndy Whitcroft { 249e7c4b0bfSAndy Whitcroft vma->vm_private_data = (void *)value; 250e7c4b0bfSAndy Whitcroft } 251e7c4b0bfSAndy Whitcroft 25284afd99bSAndy Whitcroft struct resv_map { 25384afd99bSAndy Whitcroft struct kref refs; 25484afd99bSAndy Whitcroft struct list_head regions; 25584afd99bSAndy Whitcroft }; 25684afd99bSAndy Whitcroft 25784afd99bSAndy Whitcroft struct resv_map *resv_map_alloc(void) 25884afd99bSAndy Whitcroft { 25984afd99bSAndy Whitcroft struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 26084afd99bSAndy Whitcroft if (!resv_map) 26184afd99bSAndy Whitcroft return NULL; 26284afd99bSAndy Whitcroft 26384afd99bSAndy Whitcroft kref_init(&resv_map->refs); 26484afd99bSAndy Whitcroft INIT_LIST_HEAD(&resv_map->regions); 26584afd99bSAndy Whitcroft 26684afd99bSAndy Whitcroft return resv_map; 26784afd99bSAndy Whitcroft } 26884afd99bSAndy Whitcroft 26984afd99bSAndy Whitcroft void resv_map_release(struct kref *ref) 27084afd99bSAndy Whitcroft { 27184afd99bSAndy Whitcroft struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 27284afd99bSAndy Whitcroft 27384afd99bSAndy Whitcroft /* Clear out any active regions before we release the map. */ 27484afd99bSAndy Whitcroft region_truncate(&resv_map->regions, 0); 27584afd99bSAndy Whitcroft kfree(resv_map); 27684afd99bSAndy Whitcroft } 27784afd99bSAndy Whitcroft 27884afd99bSAndy Whitcroft static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 279a1e78772SMel Gorman { 280a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 281a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 28284afd99bSAndy Whitcroft return (struct resv_map *)(get_vma_private_data(vma) & 28384afd99bSAndy Whitcroft ~HPAGE_RESV_MASK); 284a1e78772SMel Gorman return 0; 285a1e78772SMel Gorman } 286a1e78772SMel Gorman 28784afd99bSAndy Whitcroft static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 288a1e78772SMel Gorman { 289a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 290a1e78772SMel Gorman VM_BUG_ON(vma->vm_flags & VM_SHARED); 291a1e78772SMel Gorman 29284afd99bSAndy Whitcroft set_vma_private_data(vma, (get_vma_private_data(vma) & 29384afd99bSAndy Whitcroft HPAGE_RESV_MASK) | (unsigned long)map); 29404f2cbe3SMel Gorman } 29504f2cbe3SMel Gorman 29604f2cbe3SMel Gorman static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 29704f2cbe3SMel Gorman { 29804f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 299e7c4b0bfSAndy Whitcroft VM_BUG_ON(vma->vm_flags & VM_SHARED); 300e7c4b0bfSAndy Whitcroft 301e7c4b0bfSAndy Whitcroft set_vma_private_data(vma, get_vma_private_data(vma) | flags); 30204f2cbe3SMel Gorman } 30304f2cbe3SMel Gorman 30404f2cbe3SMel Gorman static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 30504f2cbe3SMel Gorman { 30604f2cbe3SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 307e7c4b0bfSAndy Whitcroft 308e7c4b0bfSAndy Whitcroft return (get_vma_private_data(vma) & flag) != 0; 309a1e78772SMel Gorman } 310a1e78772SMel Gorman 311a1e78772SMel Gorman /* Decrement the reserved pages in the hugepage pool by one */ 312a1e78772SMel Gorman static void decrement_hugepage_resv_vma(struct vm_area_struct *vma) 313a1e78772SMel Gorman { 314c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_NORESERVE) 315c37f9fb1SAndy Whitcroft return; 316c37f9fb1SAndy Whitcroft 317a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) { 318a1e78772SMel Gorman /* Shared mappings always use reserves */ 319a1e78772SMel Gorman resv_huge_pages--; 32084afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 321a1e78772SMel Gorman /* 322a1e78772SMel Gorman * Only the process that called mmap() has reserves for 323a1e78772SMel Gorman * private mappings. 324a1e78772SMel Gorman */ 325a1e78772SMel Gorman resv_huge_pages--; 326a1e78772SMel Gorman } 327a1e78772SMel Gorman } 328a1e78772SMel Gorman 32904f2cbe3SMel Gorman /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 330a1e78772SMel Gorman void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 331a1e78772SMel Gorman { 332a1e78772SMel Gorman VM_BUG_ON(!is_vm_hugetlb_page(vma)); 333a1e78772SMel Gorman if (!(vma->vm_flags & VM_SHARED)) 334a1e78772SMel Gorman vma->vm_private_data = (void *)0; 335a1e78772SMel Gorman } 336a1e78772SMel Gorman 337a1e78772SMel Gorman /* Returns true if the VMA has associated reserve pages */ 338a1e78772SMel Gorman static int vma_has_private_reserves(struct vm_area_struct *vma) 339a1e78772SMel Gorman { 340a1e78772SMel Gorman if (vma->vm_flags & VM_SHARED) 341a1e78772SMel Gorman return 0; 34284afd99bSAndy Whitcroft if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 343a1e78772SMel Gorman return 0; 344a1e78772SMel Gorman return 1; 345a1e78772SMel Gorman } 346a1e78772SMel Gorman 34779ac6ba4SDavid Gibson static void clear_huge_page(struct page *page, unsigned long addr) 34879ac6ba4SDavid Gibson { 34979ac6ba4SDavid Gibson int i; 35079ac6ba4SDavid Gibson 35179ac6ba4SDavid Gibson might_sleep(); 35279ac6ba4SDavid Gibson for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 35379ac6ba4SDavid Gibson cond_resched(); 354281e0e3bSRalf Baechle clear_user_highpage(page + i, addr + i * PAGE_SIZE); 35579ac6ba4SDavid Gibson } 35679ac6ba4SDavid Gibson } 35779ac6ba4SDavid Gibson 35879ac6ba4SDavid Gibson static void copy_huge_page(struct page *dst, struct page *src, 3599de455b2SAtsushi Nemoto unsigned long addr, struct vm_area_struct *vma) 36079ac6ba4SDavid Gibson { 36179ac6ba4SDavid Gibson int i; 36279ac6ba4SDavid Gibson 36379ac6ba4SDavid Gibson might_sleep(); 36479ac6ba4SDavid Gibson for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 36579ac6ba4SDavid Gibson cond_resched(); 3669de455b2SAtsushi Nemoto copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 36779ac6ba4SDavid Gibson } 36879ac6ba4SDavid Gibson } 36979ac6ba4SDavid Gibson 3701da177e4SLinus Torvalds static void enqueue_huge_page(struct page *page) 3711da177e4SLinus Torvalds { 3721da177e4SLinus Torvalds int nid = page_to_nid(page); 3731da177e4SLinus Torvalds list_add(&page->lru, &hugepage_freelists[nid]); 3741da177e4SLinus Torvalds free_huge_pages++; 3751da177e4SLinus Torvalds free_huge_pages_node[nid]++; 3761da177e4SLinus Torvalds } 3771da177e4SLinus Torvalds 378348e1e04SNishanth Aravamudan static struct page *dequeue_huge_page(void) 379348e1e04SNishanth Aravamudan { 380348e1e04SNishanth Aravamudan int nid; 381348e1e04SNishanth Aravamudan struct page *page = NULL; 382348e1e04SNishanth Aravamudan 383348e1e04SNishanth Aravamudan for (nid = 0; nid < MAX_NUMNODES; ++nid) { 384348e1e04SNishanth Aravamudan if (!list_empty(&hugepage_freelists[nid])) { 385348e1e04SNishanth Aravamudan page = list_entry(hugepage_freelists[nid].next, 386348e1e04SNishanth Aravamudan struct page, lru); 387348e1e04SNishanth Aravamudan list_del(&page->lru); 388348e1e04SNishanth Aravamudan free_huge_pages--; 389348e1e04SNishanth Aravamudan free_huge_pages_node[nid]--; 390348e1e04SNishanth Aravamudan break; 391348e1e04SNishanth Aravamudan } 392348e1e04SNishanth Aravamudan } 393348e1e04SNishanth Aravamudan return page; 394348e1e04SNishanth Aravamudan } 395348e1e04SNishanth Aravamudan 396348e1e04SNishanth Aravamudan static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma, 39704f2cbe3SMel Gorman unsigned long address, int avoid_reserve) 3981da177e4SLinus Torvalds { 39931a5c6e4SNishanth Aravamudan int nid; 4001da177e4SLinus Torvalds struct page *page = NULL; 401480eccf9SLee Schermerhorn struct mempolicy *mpol; 40219770b32SMel Gorman nodemask_t *nodemask; 403396faf03SMel Gorman struct zonelist *zonelist = huge_zonelist(vma, address, 40419770b32SMel Gorman htlb_alloc_mask, &mpol, &nodemask); 405dd1a239fSMel Gorman struct zone *zone; 406dd1a239fSMel Gorman struct zoneref *z; 4071da177e4SLinus Torvalds 408a1e78772SMel Gorman /* 409a1e78772SMel Gorman * A child process with MAP_PRIVATE mappings created by their parent 410a1e78772SMel Gorman * have no page reserves. This check ensures that reservations are 411a1e78772SMel Gorman * not "stolen". The child may still get SIGKILLed 412a1e78772SMel Gorman */ 413a1e78772SMel Gorman if (!vma_has_private_reserves(vma) && 414a1e78772SMel Gorman free_huge_pages - resv_huge_pages == 0) 415a1e78772SMel Gorman return NULL; 416a1e78772SMel Gorman 41704f2cbe3SMel Gorman /* If reserves cannot be used, ensure enough pages are in the pool */ 41804f2cbe3SMel Gorman if (avoid_reserve && free_huge_pages - resv_huge_pages == 0) 41904f2cbe3SMel Gorman return NULL; 42004f2cbe3SMel Gorman 42119770b32SMel Gorman for_each_zone_zonelist_nodemask(zone, z, zonelist, 42219770b32SMel Gorman MAX_NR_ZONES - 1, nodemask) { 42354a6eb5cSMel Gorman nid = zone_to_nid(zone); 42454a6eb5cSMel Gorman if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 4253abf7afdSAndrew Morton !list_empty(&hugepage_freelists[nid])) { 4261da177e4SLinus Torvalds page = list_entry(hugepage_freelists[nid].next, 4271da177e4SLinus Torvalds struct page, lru); 4281da177e4SLinus Torvalds list_del(&page->lru); 4291da177e4SLinus Torvalds free_huge_pages--; 4301da177e4SLinus Torvalds free_huge_pages_node[nid]--; 43104f2cbe3SMel Gorman 43204f2cbe3SMel Gorman if (!avoid_reserve) 433a1e78772SMel Gorman decrement_hugepage_resv_vma(vma); 434a1e78772SMel Gorman 4355ab3ee7bSKen Chen break; 4361da177e4SLinus Torvalds } 4373abf7afdSAndrew Morton } 43852cd3b07SLee Schermerhorn mpol_cond_put(mpol); 4391da177e4SLinus Torvalds return page; 4401da177e4SLinus Torvalds } 4411da177e4SLinus Torvalds 4426af2acb6SAdam Litke static void update_and_free_page(struct page *page) 4436af2acb6SAdam Litke { 4446af2acb6SAdam Litke int i; 4456af2acb6SAdam Litke nr_huge_pages--; 4466af2acb6SAdam Litke nr_huge_pages_node[page_to_nid(page)]--; 4476af2acb6SAdam Litke for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 4486af2acb6SAdam Litke page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 4496af2acb6SAdam Litke 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 4506af2acb6SAdam Litke 1 << PG_private | 1<< PG_writeback); 4516af2acb6SAdam Litke } 4526af2acb6SAdam Litke set_compound_page_dtor(page, NULL); 4536af2acb6SAdam Litke set_page_refcounted(page); 4547f2e9525SGerald Schaefer arch_release_hugepage(page); 4556af2acb6SAdam Litke __free_pages(page, HUGETLB_PAGE_ORDER); 4566af2acb6SAdam Litke } 4576af2acb6SAdam Litke 45827a85ef1SDavid Gibson static void free_huge_page(struct page *page) 45927a85ef1SDavid Gibson { 4607893d1d5SAdam Litke int nid = page_to_nid(page); 461c79fb75eSAdam Litke struct address_space *mapping; 46227a85ef1SDavid Gibson 463c79fb75eSAdam Litke mapping = (struct address_space *) page_private(page); 464e5df70abSAndy Whitcroft set_page_private(page, 0); 4657893d1d5SAdam Litke BUG_ON(page_count(page)); 46627a85ef1SDavid Gibson INIT_LIST_HEAD(&page->lru); 46727a85ef1SDavid Gibson 46827a85ef1SDavid Gibson spin_lock(&hugetlb_lock); 4697893d1d5SAdam Litke if (surplus_huge_pages_node[nid]) { 4707893d1d5SAdam Litke update_and_free_page(page); 4717893d1d5SAdam Litke surplus_huge_pages--; 4727893d1d5SAdam Litke surplus_huge_pages_node[nid]--; 4737893d1d5SAdam Litke } else { 47427a85ef1SDavid Gibson enqueue_huge_page(page); 4757893d1d5SAdam Litke } 47627a85ef1SDavid Gibson spin_unlock(&hugetlb_lock); 477c79fb75eSAdam Litke if (mapping) 4789a119c05SAdam Litke hugetlb_put_quota(mapping, 1); 47927a85ef1SDavid Gibson } 48027a85ef1SDavid Gibson 4817893d1d5SAdam Litke /* 4827893d1d5SAdam Litke * Increment or decrement surplus_huge_pages. Keep node-specific counters 4837893d1d5SAdam Litke * balanced by operating on them in a round-robin fashion. 4847893d1d5SAdam Litke * Returns 1 if an adjustment was made. 4857893d1d5SAdam Litke */ 4867893d1d5SAdam Litke static int adjust_pool_surplus(int delta) 4877893d1d5SAdam Litke { 4887893d1d5SAdam Litke static int prev_nid; 4897893d1d5SAdam Litke int nid = prev_nid; 4907893d1d5SAdam Litke int ret = 0; 4917893d1d5SAdam Litke 4927893d1d5SAdam Litke VM_BUG_ON(delta != -1 && delta != 1); 4937893d1d5SAdam Litke do { 4947893d1d5SAdam Litke nid = next_node(nid, node_online_map); 4957893d1d5SAdam Litke if (nid == MAX_NUMNODES) 4967893d1d5SAdam Litke nid = first_node(node_online_map); 4977893d1d5SAdam Litke 4987893d1d5SAdam Litke /* To shrink on this node, there must be a surplus page */ 4997893d1d5SAdam Litke if (delta < 0 && !surplus_huge_pages_node[nid]) 5007893d1d5SAdam Litke continue; 5017893d1d5SAdam Litke /* Surplus cannot exceed the total number of pages */ 5027893d1d5SAdam Litke if (delta > 0 && surplus_huge_pages_node[nid] >= 5037893d1d5SAdam Litke nr_huge_pages_node[nid]) 5047893d1d5SAdam Litke continue; 5057893d1d5SAdam Litke 5067893d1d5SAdam Litke surplus_huge_pages += delta; 5077893d1d5SAdam Litke surplus_huge_pages_node[nid] += delta; 5087893d1d5SAdam Litke ret = 1; 5097893d1d5SAdam Litke break; 5107893d1d5SAdam Litke } while (nid != prev_nid); 5117893d1d5SAdam Litke 5127893d1d5SAdam Litke prev_nid = nid; 5137893d1d5SAdam Litke return ret; 5147893d1d5SAdam Litke } 5157893d1d5SAdam Litke 516b7ba30c6SAndi Kleen static void prep_new_huge_page(struct page *page, int nid) 517b7ba30c6SAndi Kleen { 518b7ba30c6SAndi Kleen set_compound_page_dtor(page, free_huge_page); 519b7ba30c6SAndi Kleen spin_lock(&hugetlb_lock); 520b7ba30c6SAndi Kleen nr_huge_pages++; 521b7ba30c6SAndi Kleen nr_huge_pages_node[nid]++; 522b7ba30c6SAndi Kleen spin_unlock(&hugetlb_lock); 523b7ba30c6SAndi Kleen put_page(page); /* free it into the hugepage allocator */ 524b7ba30c6SAndi Kleen } 525b7ba30c6SAndi Kleen 52663b4613cSNishanth Aravamudan static struct page *alloc_fresh_huge_page_node(int nid) 5271da177e4SLinus Torvalds { 5281da177e4SLinus Torvalds struct page *page; 529f96efd58SJoe Jin 53063b4613cSNishanth Aravamudan page = alloc_pages_node(nid, 531551883aeSNishanth Aravamudan htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| 532551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 533f96efd58SJoe Jin HUGETLB_PAGE_ORDER); 5341da177e4SLinus Torvalds if (page) { 5357f2e9525SGerald Schaefer if (arch_prepare_hugepage(page)) { 5367f2e9525SGerald Schaefer __free_pages(page, HUGETLB_PAGE_ORDER); 5377b8ee84dSHarvey Harrison return NULL; 5387f2e9525SGerald Schaefer } 539b7ba30c6SAndi Kleen prep_new_huge_page(page, nid); 5401da177e4SLinus Torvalds } 54163b4613cSNishanth Aravamudan 54263b4613cSNishanth Aravamudan return page; 54363b4613cSNishanth Aravamudan } 54463b4613cSNishanth Aravamudan 54563b4613cSNishanth Aravamudan static int alloc_fresh_huge_page(void) 54663b4613cSNishanth Aravamudan { 54763b4613cSNishanth Aravamudan struct page *page; 54863b4613cSNishanth Aravamudan int start_nid; 54963b4613cSNishanth Aravamudan int next_nid; 55063b4613cSNishanth Aravamudan int ret = 0; 55163b4613cSNishanth Aravamudan 55263b4613cSNishanth Aravamudan start_nid = hugetlb_next_nid; 55363b4613cSNishanth Aravamudan 55463b4613cSNishanth Aravamudan do { 55563b4613cSNishanth Aravamudan page = alloc_fresh_huge_page_node(hugetlb_next_nid); 55663b4613cSNishanth Aravamudan if (page) 55763b4613cSNishanth Aravamudan ret = 1; 55863b4613cSNishanth Aravamudan /* 55963b4613cSNishanth Aravamudan * Use a helper variable to find the next node and then 56063b4613cSNishanth Aravamudan * copy it back to hugetlb_next_nid afterwards: 56163b4613cSNishanth Aravamudan * otherwise there's a window in which a racer might 56263b4613cSNishanth Aravamudan * pass invalid nid MAX_NUMNODES to alloc_pages_node. 56363b4613cSNishanth Aravamudan * But we don't need to use a spin_lock here: it really 56463b4613cSNishanth Aravamudan * doesn't matter if occasionally a racer chooses the 56563b4613cSNishanth Aravamudan * same nid as we do. Move nid forward in the mask even 56663b4613cSNishanth Aravamudan * if we just successfully allocated a hugepage so that 56763b4613cSNishanth Aravamudan * the next caller gets hugepages on the next node. 56863b4613cSNishanth Aravamudan */ 56963b4613cSNishanth Aravamudan next_nid = next_node(hugetlb_next_nid, node_online_map); 57063b4613cSNishanth Aravamudan if (next_nid == MAX_NUMNODES) 57163b4613cSNishanth Aravamudan next_nid = first_node(node_online_map); 57263b4613cSNishanth Aravamudan hugetlb_next_nid = next_nid; 57363b4613cSNishanth Aravamudan } while (!page && hugetlb_next_nid != start_nid); 57463b4613cSNishanth Aravamudan 5753b116300SAdam Litke if (ret) 5763b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC); 5773b116300SAdam Litke else 5783b116300SAdam Litke count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 5793b116300SAdam Litke 58063b4613cSNishanth Aravamudan return ret; 5811da177e4SLinus Torvalds } 5821da177e4SLinus Torvalds 5837893d1d5SAdam Litke static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, 5847893d1d5SAdam Litke unsigned long address) 5857893d1d5SAdam Litke { 5867893d1d5SAdam Litke struct page *page; 587d1c3fb1fSNishanth Aravamudan unsigned int nid; 5887893d1d5SAdam Litke 589d1c3fb1fSNishanth Aravamudan /* 590d1c3fb1fSNishanth Aravamudan * Assume we will successfully allocate the surplus page to 591d1c3fb1fSNishanth Aravamudan * prevent racing processes from causing the surplus to exceed 592d1c3fb1fSNishanth Aravamudan * overcommit 593d1c3fb1fSNishanth Aravamudan * 594d1c3fb1fSNishanth Aravamudan * This however introduces a different race, where a process B 595d1c3fb1fSNishanth Aravamudan * tries to grow the static hugepage pool while alloc_pages() is 596d1c3fb1fSNishanth Aravamudan * called by process A. B will only examine the per-node 597d1c3fb1fSNishanth Aravamudan * counters in determining if surplus huge pages can be 598d1c3fb1fSNishanth Aravamudan * converted to normal huge pages in adjust_pool_surplus(). A 599d1c3fb1fSNishanth Aravamudan * won't be able to increment the per-node counter, until the 600d1c3fb1fSNishanth Aravamudan * lock is dropped by B, but B doesn't drop hugetlb_lock until 601d1c3fb1fSNishanth Aravamudan * no more huge pages can be converted from surplus to normal 602d1c3fb1fSNishanth Aravamudan * state (and doesn't try to convert again). Thus, we have a 603d1c3fb1fSNishanth Aravamudan * case where a surplus huge page exists, the pool is grown, and 604d1c3fb1fSNishanth Aravamudan * the surplus huge page still exists after, even though it 605d1c3fb1fSNishanth Aravamudan * should just have been converted to a normal huge page. This 606d1c3fb1fSNishanth Aravamudan * does not leak memory, though, as the hugepage will be freed 607d1c3fb1fSNishanth Aravamudan * once it is out of use. It also does not allow the counters to 608d1c3fb1fSNishanth Aravamudan * go out of whack in adjust_pool_surplus() as we don't modify 609d1c3fb1fSNishanth Aravamudan * the node values until we've gotten the hugepage and only the 610d1c3fb1fSNishanth Aravamudan * per-node value is checked there. 611d1c3fb1fSNishanth Aravamudan */ 612d1c3fb1fSNishanth Aravamudan spin_lock(&hugetlb_lock); 613d1c3fb1fSNishanth Aravamudan if (surplus_huge_pages >= nr_overcommit_huge_pages) { 614d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 615d1c3fb1fSNishanth Aravamudan return NULL; 616d1c3fb1fSNishanth Aravamudan } else { 617d1c3fb1fSNishanth Aravamudan nr_huge_pages++; 618d1c3fb1fSNishanth Aravamudan surplus_huge_pages++; 619d1c3fb1fSNishanth Aravamudan } 620d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 621d1c3fb1fSNishanth Aravamudan 622551883aeSNishanth Aravamudan page = alloc_pages(htlb_alloc_mask|__GFP_COMP| 623551883aeSNishanth Aravamudan __GFP_REPEAT|__GFP_NOWARN, 6247893d1d5SAdam Litke HUGETLB_PAGE_ORDER); 625d1c3fb1fSNishanth Aravamudan 6267893d1d5SAdam Litke spin_lock(&hugetlb_lock); 627d1c3fb1fSNishanth Aravamudan if (page) { 6282668db91SAdam Litke /* 6292668db91SAdam Litke * This page is now managed by the hugetlb allocator and has 6302668db91SAdam Litke * no users -- drop the buddy allocator's reference. 6312668db91SAdam Litke */ 6322668db91SAdam Litke put_page_testzero(page); 6332668db91SAdam Litke VM_BUG_ON(page_count(page)); 634d1c3fb1fSNishanth Aravamudan nid = page_to_nid(page); 635d1c3fb1fSNishanth Aravamudan set_compound_page_dtor(page, free_huge_page); 636d1c3fb1fSNishanth Aravamudan /* 637d1c3fb1fSNishanth Aravamudan * We incremented the global counters already 638d1c3fb1fSNishanth Aravamudan */ 639d1c3fb1fSNishanth Aravamudan nr_huge_pages_node[nid]++; 640d1c3fb1fSNishanth Aravamudan surplus_huge_pages_node[nid]++; 6413b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC); 642d1c3fb1fSNishanth Aravamudan } else { 643d1c3fb1fSNishanth Aravamudan nr_huge_pages--; 644d1c3fb1fSNishanth Aravamudan surplus_huge_pages--; 6453b116300SAdam Litke __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 6467893d1d5SAdam Litke } 647d1c3fb1fSNishanth Aravamudan spin_unlock(&hugetlb_lock); 6487893d1d5SAdam Litke 6497893d1d5SAdam Litke return page; 6507893d1d5SAdam Litke } 6517893d1d5SAdam Litke 652e4e574b7SAdam Litke /* 653e4e574b7SAdam Litke * Increase the hugetlb pool such that it can accomodate a reservation 654e4e574b7SAdam Litke * of size 'delta'. 655e4e574b7SAdam Litke */ 656e4e574b7SAdam Litke static int gather_surplus_pages(int delta) 657e4e574b7SAdam Litke { 658e4e574b7SAdam Litke struct list_head surplus_list; 659e4e574b7SAdam Litke struct page *page, *tmp; 660e4e574b7SAdam Litke int ret, i; 661e4e574b7SAdam Litke int needed, allocated; 662e4e574b7SAdam Litke 663e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - free_huge_pages; 664ac09b3a1SAdam Litke if (needed <= 0) { 665ac09b3a1SAdam Litke resv_huge_pages += delta; 666e4e574b7SAdam Litke return 0; 667ac09b3a1SAdam Litke } 668e4e574b7SAdam Litke 669e4e574b7SAdam Litke allocated = 0; 670e4e574b7SAdam Litke INIT_LIST_HEAD(&surplus_list); 671e4e574b7SAdam Litke 672e4e574b7SAdam Litke ret = -ENOMEM; 673e4e574b7SAdam Litke retry: 674e4e574b7SAdam Litke spin_unlock(&hugetlb_lock); 675e4e574b7SAdam Litke for (i = 0; i < needed; i++) { 676e4e574b7SAdam Litke page = alloc_buddy_huge_page(NULL, 0); 677e4e574b7SAdam Litke if (!page) { 678e4e574b7SAdam Litke /* 679e4e574b7SAdam Litke * We were not able to allocate enough pages to 680e4e574b7SAdam Litke * satisfy the entire reservation so we free what 681e4e574b7SAdam Litke * we've allocated so far. 682e4e574b7SAdam Litke */ 683e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 684e4e574b7SAdam Litke needed = 0; 685e4e574b7SAdam Litke goto free; 686e4e574b7SAdam Litke } 687e4e574b7SAdam Litke 688e4e574b7SAdam Litke list_add(&page->lru, &surplus_list); 689e4e574b7SAdam Litke } 690e4e574b7SAdam Litke allocated += needed; 691e4e574b7SAdam Litke 692e4e574b7SAdam Litke /* 693e4e574b7SAdam Litke * After retaking hugetlb_lock, we need to recalculate 'needed' 694e4e574b7SAdam Litke * because either resv_huge_pages or free_huge_pages may have changed. 695e4e574b7SAdam Litke */ 696e4e574b7SAdam Litke spin_lock(&hugetlb_lock); 697e4e574b7SAdam Litke needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); 698e4e574b7SAdam Litke if (needed > 0) 699e4e574b7SAdam Litke goto retry; 700e4e574b7SAdam Litke 701e4e574b7SAdam Litke /* 702e4e574b7SAdam Litke * The surplus_list now contains _at_least_ the number of extra pages 703e4e574b7SAdam Litke * needed to accomodate the reservation. Add the appropriate number 704e4e574b7SAdam Litke * of pages to the hugetlb pool and free the extras back to the buddy 705ac09b3a1SAdam Litke * allocator. Commit the entire reservation here to prevent another 706ac09b3a1SAdam Litke * process from stealing the pages as they are added to the pool but 707ac09b3a1SAdam Litke * before they are reserved. 708e4e574b7SAdam Litke */ 709e4e574b7SAdam Litke needed += allocated; 710ac09b3a1SAdam Litke resv_huge_pages += delta; 711e4e574b7SAdam Litke ret = 0; 712e4e574b7SAdam Litke free: 71319fc3f0aSAdam Litke /* Free the needed pages to the hugetlb pool */ 71419fc3f0aSAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 71519fc3f0aSAdam Litke if ((--needed) < 0) 71619fc3f0aSAdam Litke break; 71719fc3f0aSAdam Litke list_del(&page->lru); 71819fc3f0aSAdam Litke enqueue_huge_page(page); 71919fc3f0aSAdam Litke } 72019fc3f0aSAdam Litke 72119fc3f0aSAdam Litke /* Free unnecessary surplus pages to the buddy allocator */ 72219fc3f0aSAdam Litke if (!list_empty(&surplus_list)) { 72319fc3f0aSAdam Litke spin_unlock(&hugetlb_lock); 724e4e574b7SAdam Litke list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 725e4e574b7SAdam Litke list_del(&page->lru); 726af767cbdSAdam Litke /* 7272668db91SAdam Litke * The page has a reference count of zero already, so 7282668db91SAdam Litke * call free_huge_page directly instead of using 7292668db91SAdam Litke * put_page. This must be done with hugetlb_lock 730af767cbdSAdam Litke * unlocked which is safe because free_huge_page takes 731af767cbdSAdam Litke * hugetlb_lock before deciding how to free the page. 732af767cbdSAdam Litke */ 7332668db91SAdam Litke free_huge_page(page); 734af767cbdSAdam Litke } 73519fc3f0aSAdam Litke spin_lock(&hugetlb_lock); 736e4e574b7SAdam Litke } 737e4e574b7SAdam Litke 738e4e574b7SAdam Litke return ret; 739e4e574b7SAdam Litke } 740e4e574b7SAdam Litke 741e4e574b7SAdam Litke /* 742e4e574b7SAdam Litke * When releasing a hugetlb pool reservation, any surplus pages that were 743e4e574b7SAdam Litke * allocated to satisfy the reservation must be explicitly freed if they were 744e4e574b7SAdam Litke * never used. 745e4e574b7SAdam Litke */ 7468cde045cSAdrian Bunk static void return_unused_surplus_pages(unsigned long unused_resv_pages) 747e4e574b7SAdam Litke { 748e4e574b7SAdam Litke static int nid = -1; 749e4e574b7SAdam Litke struct page *page; 750e4e574b7SAdam Litke unsigned long nr_pages; 751e4e574b7SAdam Litke 75211320d17SNishanth Aravamudan /* 75311320d17SNishanth Aravamudan * We want to release as many surplus pages as possible, spread 75411320d17SNishanth Aravamudan * evenly across all nodes. Iterate across all nodes until we 75511320d17SNishanth Aravamudan * can no longer free unreserved surplus pages. This occurs when 75611320d17SNishanth Aravamudan * the nodes with surplus pages have no free pages. 75711320d17SNishanth Aravamudan */ 75811320d17SNishanth Aravamudan unsigned long remaining_iterations = num_online_nodes(); 75911320d17SNishanth Aravamudan 760ac09b3a1SAdam Litke /* Uncommit the reservation */ 761ac09b3a1SAdam Litke resv_huge_pages -= unused_resv_pages; 762ac09b3a1SAdam Litke 763e4e574b7SAdam Litke nr_pages = min(unused_resv_pages, surplus_huge_pages); 764e4e574b7SAdam Litke 76511320d17SNishanth Aravamudan while (remaining_iterations-- && nr_pages) { 766e4e574b7SAdam Litke nid = next_node(nid, node_online_map); 767e4e574b7SAdam Litke if (nid == MAX_NUMNODES) 768e4e574b7SAdam Litke nid = first_node(node_online_map); 769e4e574b7SAdam Litke 770e4e574b7SAdam Litke if (!surplus_huge_pages_node[nid]) 771e4e574b7SAdam Litke continue; 772e4e574b7SAdam Litke 773e4e574b7SAdam Litke if (!list_empty(&hugepage_freelists[nid])) { 774e4e574b7SAdam Litke page = list_entry(hugepage_freelists[nid].next, 775e4e574b7SAdam Litke struct page, lru); 776e4e574b7SAdam Litke list_del(&page->lru); 777e4e574b7SAdam Litke update_and_free_page(page); 778e4e574b7SAdam Litke free_huge_pages--; 779e4e574b7SAdam Litke free_huge_pages_node[nid]--; 780e4e574b7SAdam Litke surplus_huge_pages--; 781e4e574b7SAdam Litke surplus_huge_pages_node[nid]--; 782e4e574b7SAdam Litke nr_pages--; 78311320d17SNishanth Aravamudan remaining_iterations = num_online_nodes(); 784e4e574b7SAdam Litke } 785e4e574b7SAdam Litke } 786e4e574b7SAdam Litke } 787e4e574b7SAdam Litke 788c37f9fb1SAndy Whitcroft /* 789c37f9fb1SAndy Whitcroft * Determine if the huge page at addr within the vma has an associated 790c37f9fb1SAndy Whitcroft * reservation. Where it does not we will need to logically increase 791c37f9fb1SAndy Whitcroft * reservation and actually increase quota before an allocation can occur. 792c37f9fb1SAndy Whitcroft * Where any new reservation would be required the reservation change is 793c37f9fb1SAndy Whitcroft * prepared, but not committed. Once the page has been quota'd allocated 794c37f9fb1SAndy Whitcroft * an instantiated the change should be committed via vma_commit_reservation. 795c37f9fb1SAndy Whitcroft * No action is required on failure. 796c37f9fb1SAndy Whitcroft */ 797c37f9fb1SAndy Whitcroft static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr) 798c37f9fb1SAndy Whitcroft { 799c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 800c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 801c37f9fb1SAndy Whitcroft 802c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 803a858f7b2SJohannes Weiner pgoff_t idx = vma_hugecache_offset(vma, addr); 804c37f9fb1SAndy Whitcroft return region_chg(&inode->i_mapping->private_list, 805c37f9fb1SAndy Whitcroft idx, idx + 1); 806c37f9fb1SAndy Whitcroft 80784afd99bSAndy Whitcroft } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 808c37f9fb1SAndy Whitcroft return 1; 809c37f9fb1SAndy Whitcroft 81084afd99bSAndy Whitcroft } else { 81184afd99bSAndy Whitcroft int err; 812a858f7b2SJohannes Weiner pgoff_t idx = vma_hugecache_offset(vma, addr); 81384afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 81484afd99bSAndy Whitcroft 81584afd99bSAndy Whitcroft err = region_chg(&reservations->regions, idx, idx + 1); 81684afd99bSAndy Whitcroft if (err < 0) 81784afd99bSAndy Whitcroft return err; 818c37f9fb1SAndy Whitcroft return 0; 819c37f9fb1SAndy Whitcroft } 82084afd99bSAndy Whitcroft } 821c37f9fb1SAndy Whitcroft static void vma_commit_reservation(struct vm_area_struct *vma, 822c37f9fb1SAndy Whitcroft unsigned long addr) 823c37f9fb1SAndy Whitcroft { 824c37f9fb1SAndy Whitcroft struct address_space *mapping = vma->vm_file->f_mapping; 825c37f9fb1SAndy Whitcroft struct inode *inode = mapping->host; 826c37f9fb1SAndy Whitcroft 827c37f9fb1SAndy Whitcroft if (vma->vm_flags & VM_SHARED) { 828a858f7b2SJohannes Weiner pgoff_t idx = vma_hugecache_offset(vma, addr); 829c37f9fb1SAndy Whitcroft region_add(&inode->i_mapping->private_list, idx, idx + 1); 83084afd99bSAndy Whitcroft 83184afd99bSAndy Whitcroft } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 832a858f7b2SJohannes Weiner pgoff_t idx = vma_hugecache_offset(vma, addr); 83384afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 83484afd99bSAndy Whitcroft 83584afd99bSAndy Whitcroft /* Mark this page used in the map. */ 83684afd99bSAndy Whitcroft region_add(&reservations->regions, idx, idx + 1); 837c37f9fb1SAndy Whitcroft } 838c37f9fb1SAndy Whitcroft } 839c37f9fb1SAndy Whitcroft 840348ea204SAdam Litke static struct page *alloc_huge_page(struct vm_area_struct *vma, 84104f2cbe3SMel Gorman unsigned long addr, int avoid_reserve) 842348ea204SAdam Litke { 843348ea204SAdam Litke struct page *page; 8442fc39cecSAdam Litke struct address_space *mapping = vma->vm_file->f_mapping; 845a1e78772SMel Gorman struct inode *inode = mapping->host; 846c37f9fb1SAndy Whitcroft unsigned int chg; 8472fc39cecSAdam Litke 848a1e78772SMel Gorman /* 849a1e78772SMel Gorman * Processes that did not create the mapping will have no reserves and 850a1e78772SMel Gorman * will not have accounted against quota. Check that the quota can be 851a1e78772SMel Gorman * made before satisfying the allocation 852c37f9fb1SAndy Whitcroft * MAP_NORESERVE mappings may also need pages and quota allocated 853c37f9fb1SAndy Whitcroft * if no reserve mapping overlaps. 854a1e78772SMel Gorman */ 855c37f9fb1SAndy Whitcroft chg = vma_needs_reservation(vma, addr); 856c37f9fb1SAndy Whitcroft if (chg < 0) 857c37f9fb1SAndy Whitcroft return ERR_PTR(chg); 858c37f9fb1SAndy Whitcroft if (chg) 859a1e78772SMel Gorman if (hugetlb_get_quota(inode->i_mapping, chg)) 860a1e78772SMel Gorman return ERR_PTR(-ENOSPC); 86190d8b7e6SAdam Litke 862a1e78772SMel Gorman spin_lock(&hugetlb_lock); 86304f2cbe3SMel Gorman page = dequeue_huge_page_vma(vma, addr, avoid_reserve); 864a1e78772SMel Gorman spin_unlock(&hugetlb_lock); 865a1e78772SMel Gorman 866a1e78772SMel Gorman if (!page) { 867a1e78772SMel Gorman page = alloc_buddy_huge_page(vma, addr); 868a1e78772SMel Gorman if (!page) { 869a1e78772SMel Gorman hugetlb_put_quota(inode->i_mapping, chg); 870a1e78772SMel Gorman return ERR_PTR(-VM_FAULT_OOM); 871a1e78772SMel Gorman } 872a1e78772SMel Gorman } 873a1e78772SMel Gorman 874348ea204SAdam Litke set_page_refcounted(page); 8752fc39cecSAdam Litke set_page_private(page, (unsigned long) mapping); 876a1e78772SMel Gorman 877c37f9fb1SAndy Whitcroft vma_commit_reservation(vma, addr); 878c37f9fb1SAndy Whitcroft 8797893d1d5SAdam Litke return page; 880b45b5bd6SDavid Gibson } 881b45b5bd6SDavid Gibson 8821da177e4SLinus Torvalds static int __init hugetlb_init(void) 8831da177e4SLinus Torvalds { 8841da177e4SLinus Torvalds unsigned long i; 8851da177e4SLinus Torvalds 8863c726f8dSBenjamin Herrenschmidt if (HPAGE_SHIFT == 0) 8873c726f8dSBenjamin Herrenschmidt return 0; 8883c726f8dSBenjamin Herrenschmidt 8891da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) 8901da177e4SLinus Torvalds INIT_LIST_HEAD(&hugepage_freelists[i]); 8911da177e4SLinus Torvalds 89263b4613cSNishanth Aravamudan hugetlb_next_nid = first_node(node_online_map); 89363b4613cSNishanth Aravamudan 8941da177e4SLinus Torvalds for (i = 0; i < max_huge_pages; ++i) { 895a482289dSNick Piggin if (!alloc_fresh_huge_page()) 8961da177e4SLinus Torvalds break; 8971da177e4SLinus Torvalds } 8981da177e4SLinus Torvalds max_huge_pages = free_huge_pages = nr_huge_pages = i; 8991da177e4SLinus Torvalds printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 9001da177e4SLinus Torvalds return 0; 9011da177e4SLinus Torvalds } 9021da177e4SLinus Torvalds module_init(hugetlb_init); 9031da177e4SLinus Torvalds 9041da177e4SLinus Torvalds static int __init hugetlb_setup(char *s) 9051da177e4SLinus Torvalds { 9061da177e4SLinus Torvalds if (sscanf(s, "%lu", &max_huge_pages) <= 0) 9071da177e4SLinus Torvalds max_huge_pages = 0; 9081da177e4SLinus Torvalds return 1; 9091da177e4SLinus Torvalds } 9101da177e4SLinus Torvalds __setup("hugepages=", hugetlb_setup); 9111da177e4SLinus Torvalds 9128a630112SKen Chen static unsigned int cpuset_mems_nr(unsigned int *array) 9138a630112SKen Chen { 9148a630112SKen Chen int node; 9158a630112SKen Chen unsigned int nr = 0; 9168a630112SKen Chen 9178a630112SKen Chen for_each_node_mask(node, cpuset_current_mems_allowed) 9188a630112SKen Chen nr += array[node]; 9198a630112SKen Chen 9208a630112SKen Chen return nr; 9218a630112SKen Chen } 9228a630112SKen Chen 9231da177e4SLinus Torvalds #ifdef CONFIG_SYSCTL 9241da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM 9251da177e4SLinus Torvalds static void try_to_free_low(unsigned long count) 9261da177e4SLinus Torvalds { 9274415cc8dSChristoph Lameter int i; 9284415cc8dSChristoph Lameter 9291da177e4SLinus Torvalds for (i = 0; i < MAX_NUMNODES; ++i) { 9301da177e4SLinus Torvalds struct page *page, *next; 9311da177e4SLinus Torvalds list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 9326b0c880dSAdam Litke if (count >= nr_huge_pages) 9336b0c880dSAdam Litke return; 9341da177e4SLinus Torvalds if (PageHighMem(page)) 9351da177e4SLinus Torvalds continue; 9361da177e4SLinus Torvalds list_del(&page->lru); 9371da177e4SLinus Torvalds update_and_free_page(page); 9381da177e4SLinus Torvalds free_huge_pages--; 9394415cc8dSChristoph Lameter free_huge_pages_node[page_to_nid(page)]--; 9401da177e4SLinus Torvalds } 9411da177e4SLinus Torvalds } 9421da177e4SLinus Torvalds } 9431da177e4SLinus Torvalds #else 9441da177e4SLinus Torvalds static inline void try_to_free_low(unsigned long count) 9451da177e4SLinus Torvalds { 9461da177e4SLinus Torvalds } 9471da177e4SLinus Torvalds #endif 9481da177e4SLinus Torvalds 9497893d1d5SAdam Litke #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) 9501da177e4SLinus Torvalds static unsigned long set_max_huge_pages(unsigned long count) 9511da177e4SLinus Torvalds { 9527893d1d5SAdam Litke unsigned long min_count, ret; 9531da177e4SLinus Torvalds 9547893d1d5SAdam Litke /* 9557893d1d5SAdam Litke * Increase the pool size 9567893d1d5SAdam Litke * First take pages out of surplus state. Then make up the 9577893d1d5SAdam Litke * remaining difference by allocating fresh huge pages. 958d1c3fb1fSNishanth Aravamudan * 959d1c3fb1fSNishanth Aravamudan * We might race with alloc_buddy_huge_page() here and be unable 960d1c3fb1fSNishanth Aravamudan * to convert a surplus huge page to a normal huge page. That is 961d1c3fb1fSNishanth Aravamudan * not critical, though, it just means the overall size of the 962d1c3fb1fSNishanth Aravamudan * pool might be one hugepage larger than it needs to be, but 963d1c3fb1fSNishanth Aravamudan * within all the constraints specified by the sysctls. 9647893d1d5SAdam Litke */ 9651da177e4SLinus Torvalds spin_lock(&hugetlb_lock); 9667893d1d5SAdam Litke while (surplus_huge_pages && count > persistent_huge_pages) { 9677893d1d5SAdam Litke if (!adjust_pool_surplus(-1)) 9687893d1d5SAdam Litke break; 9697893d1d5SAdam Litke } 9707893d1d5SAdam Litke 9717893d1d5SAdam Litke while (count > persistent_huge_pages) { 9727893d1d5SAdam Litke /* 9737893d1d5SAdam Litke * If this allocation races such that we no longer need the 9747893d1d5SAdam Litke * page, free_huge_page will handle it by freeing the page 9757893d1d5SAdam Litke * and reducing the surplus. 9767893d1d5SAdam Litke */ 9777893d1d5SAdam Litke spin_unlock(&hugetlb_lock); 9787893d1d5SAdam Litke ret = alloc_fresh_huge_page(); 9797893d1d5SAdam Litke spin_lock(&hugetlb_lock); 9807893d1d5SAdam Litke if (!ret) 9817893d1d5SAdam Litke goto out; 9827893d1d5SAdam Litke 9837893d1d5SAdam Litke } 9847893d1d5SAdam Litke 9857893d1d5SAdam Litke /* 9867893d1d5SAdam Litke * Decrease the pool size 9877893d1d5SAdam Litke * First return free pages to the buddy allocator (being careful 9887893d1d5SAdam Litke * to keep enough around to satisfy reservations). Then place 9897893d1d5SAdam Litke * pages into surplus state as needed so the pool will shrink 9907893d1d5SAdam Litke * to the desired size as pages become free. 991d1c3fb1fSNishanth Aravamudan * 992d1c3fb1fSNishanth Aravamudan * By placing pages into the surplus state independent of the 993d1c3fb1fSNishanth Aravamudan * overcommit value, we are allowing the surplus pool size to 994d1c3fb1fSNishanth Aravamudan * exceed overcommit. There are few sane options here. Since 995d1c3fb1fSNishanth Aravamudan * alloc_buddy_huge_page() is checking the global counter, 996d1c3fb1fSNishanth Aravamudan * though, we'll note that we're not allowed to exceed surplus 997d1c3fb1fSNishanth Aravamudan * and won't grow the pool anywhere else. Not until one of the 998d1c3fb1fSNishanth Aravamudan * sysctls are changed, or the surplus pages go out of use. 9997893d1d5SAdam Litke */ 10006b0c880dSAdam Litke min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; 10016b0c880dSAdam Litke min_count = max(count, min_count); 10027893d1d5SAdam Litke try_to_free_low(min_count); 10037893d1d5SAdam Litke while (min_count < persistent_huge_pages) { 1004348e1e04SNishanth Aravamudan struct page *page = dequeue_huge_page(); 10051da177e4SLinus Torvalds if (!page) 10061da177e4SLinus Torvalds break; 10071da177e4SLinus Torvalds update_and_free_page(page); 10081da177e4SLinus Torvalds } 10097893d1d5SAdam Litke while (count < persistent_huge_pages) { 10107893d1d5SAdam Litke if (!adjust_pool_surplus(1)) 10117893d1d5SAdam Litke break; 10127893d1d5SAdam Litke } 10137893d1d5SAdam Litke out: 10147893d1d5SAdam Litke ret = persistent_huge_pages; 10151da177e4SLinus Torvalds spin_unlock(&hugetlb_lock); 10167893d1d5SAdam Litke return ret; 10171da177e4SLinus Torvalds } 10181da177e4SLinus Torvalds 10191da177e4SLinus Torvalds int hugetlb_sysctl_handler(struct ctl_table *table, int write, 10201da177e4SLinus Torvalds struct file *file, void __user *buffer, 10211da177e4SLinus Torvalds size_t *length, loff_t *ppos) 10221da177e4SLinus Torvalds { 10231da177e4SLinus Torvalds proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 10241da177e4SLinus Torvalds max_huge_pages = set_max_huge_pages(max_huge_pages); 10251da177e4SLinus Torvalds return 0; 10261da177e4SLinus Torvalds } 1027396faf03SMel Gorman 1028396faf03SMel Gorman int hugetlb_treat_movable_handler(struct ctl_table *table, int write, 1029396faf03SMel Gorman struct file *file, void __user *buffer, 1030396faf03SMel Gorman size_t *length, loff_t *ppos) 1031396faf03SMel Gorman { 1032396faf03SMel Gorman proc_dointvec(table, write, file, buffer, length, ppos); 1033396faf03SMel Gorman if (hugepages_treat_as_movable) 1034396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; 1035396faf03SMel Gorman else 1036396faf03SMel Gorman htlb_alloc_mask = GFP_HIGHUSER; 1037396faf03SMel Gorman return 0; 1038396faf03SMel Gorman } 1039396faf03SMel Gorman 1040a3d0c6aaSNishanth Aravamudan int hugetlb_overcommit_handler(struct ctl_table *table, int write, 1041a3d0c6aaSNishanth Aravamudan struct file *file, void __user *buffer, 1042a3d0c6aaSNishanth Aravamudan size_t *length, loff_t *ppos) 1043a3d0c6aaSNishanth Aravamudan { 1044a3d0c6aaSNishanth Aravamudan proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 1045064d9efeSNishanth Aravamudan spin_lock(&hugetlb_lock); 1046064d9efeSNishanth Aravamudan nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; 1047a3d0c6aaSNishanth Aravamudan spin_unlock(&hugetlb_lock); 1048a3d0c6aaSNishanth Aravamudan return 0; 1049a3d0c6aaSNishanth Aravamudan } 1050a3d0c6aaSNishanth Aravamudan 10511da177e4SLinus Torvalds #endif /* CONFIG_SYSCTL */ 10521da177e4SLinus Torvalds 10531da177e4SLinus Torvalds int hugetlb_report_meminfo(char *buf) 10541da177e4SLinus Torvalds { 10551da177e4SLinus Torvalds return sprintf(buf, 10561da177e4SLinus Torvalds "HugePages_Total: %5lu\n" 10571da177e4SLinus Torvalds "HugePages_Free: %5lu\n" 1058b45b5bd6SDavid Gibson "HugePages_Rsvd: %5lu\n" 10597893d1d5SAdam Litke "HugePages_Surp: %5lu\n" 10601da177e4SLinus Torvalds "Hugepagesize: %5lu kB\n", 10611da177e4SLinus Torvalds nr_huge_pages, 10621da177e4SLinus Torvalds free_huge_pages, 1063a43a8c39SChen, Kenneth W resv_huge_pages, 10647893d1d5SAdam Litke surplus_huge_pages, 10651da177e4SLinus Torvalds HPAGE_SIZE/1024); 10661da177e4SLinus Torvalds } 10671da177e4SLinus Torvalds 10681da177e4SLinus Torvalds int hugetlb_report_node_meminfo(int nid, char *buf) 10691da177e4SLinus Torvalds { 10701da177e4SLinus Torvalds return sprintf(buf, 10711da177e4SLinus Torvalds "Node %d HugePages_Total: %5u\n" 1072a1de0919SNishanth Aravamudan "Node %d HugePages_Free: %5u\n" 1073a1de0919SNishanth Aravamudan "Node %d HugePages_Surp: %5u\n", 10741da177e4SLinus Torvalds nid, nr_huge_pages_node[nid], 1075a1de0919SNishanth Aravamudan nid, free_huge_pages_node[nid], 1076a1de0919SNishanth Aravamudan nid, surplus_huge_pages_node[nid]); 10771da177e4SLinus Torvalds } 10781da177e4SLinus Torvalds 10791da177e4SLinus Torvalds /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 10801da177e4SLinus Torvalds unsigned long hugetlb_total_pages(void) 10811da177e4SLinus Torvalds { 10821da177e4SLinus Torvalds return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 10831da177e4SLinus Torvalds } 10841da177e4SLinus Torvalds 1085fc1b8a73SMel Gorman static int hugetlb_acct_memory(long delta) 1086fc1b8a73SMel Gorman { 1087fc1b8a73SMel Gorman int ret = -ENOMEM; 1088fc1b8a73SMel Gorman 1089fc1b8a73SMel Gorman spin_lock(&hugetlb_lock); 1090fc1b8a73SMel Gorman /* 1091fc1b8a73SMel Gorman * When cpuset is configured, it breaks the strict hugetlb page 1092fc1b8a73SMel Gorman * reservation as the accounting is done on a global variable. Such 1093fc1b8a73SMel Gorman * reservation is completely rubbish in the presence of cpuset because 1094fc1b8a73SMel Gorman * the reservation is not checked against page availability for the 1095fc1b8a73SMel Gorman * current cpuset. Application can still potentially OOM'ed by kernel 1096fc1b8a73SMel Gorman * with lack of free htlb page in cpuset that the task is in. 1097fc1b8a73SMel Gorman * Attempt to enforce strict accounting with cpuset is almost 1098fc1b8a73SMel Gorman * impossible (or too ugly) because cpuset is too fluid that 1099fc1b8a73SMel Gorman * task or memory node can be dynamically moved between cpusets. 1100fc1b8a73SMel Gorman * 1101fc1b8a73SMel Gorman * The change of semantics for shared hugetlb mapping with cpuset is 1102fc1b8a73SMel Gorman * undesirable. However, in order to preserve some of the semantics, 1103fc1b8a73SMel Gorman * we fall back to check against current free page availability as 1104fc1b8a73SMel Gorman * a best attempt and hopefully to minimize the impact of changing 1105fc1b8a73SMel Gorman * semantics that cpuset has. 1106fc1b8a73SMel Gorman */ 1107fc1b8a73SMel Gorman if (delta > 0) { 1108fc1b8a73SMel Gorman if (gather_surplus_pages(delta) < 0) 1109fc1b8a73SMel Gorman goto out; 1110fc1b8a73SMel Gorman 1111fc1b8a73SMel Gorman if (delta > cpuset_mems_nr(free_huge_pages_node)) { 1112fc1b8a73SMel Gorman return_unused_surplus_pages(delta); 1113fc1b8a73SMel Gorman goto out; 1114fc1b8a73SMel Gorman } 1115fc1b8a73SMel Gorman } 1116fc1b8a73SMel Gorman 1117fc1b8a73SMel Gorman ret = 0; 1118fc1b8a73SMel Gorman if (delta < 0) 1119fc1b8a73SMel Gorman return_unused_surplus_pages((unsigned long) -delta); 1120fc1b8a73SMel Gorman 1121fc1b8a73SMel Gorman out: 1122fc1b8a73SMel Gorman spin_unlock(&hugetlb_lock); 1123fc1b8a73SMel Gorman return ret; 1124fc1b8a73SMel Gorman } 1125fc1b8a73SMel Gorman 112684afd99bSAndy Whitcroft static void hugetlb_vm_op_open(struct vm_area_struct *vma) 112784afd99bSAndy Whitcroft { 112884afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 112984afd99bSAndy Whitcroft 113084afd99bSAndy Whitcroft /* 113184afd99bSAndy Whitcroft * This new VMA should share its siblings reservation map if present. 113284afd99bSAndy Whitcroft * The VMA will only ever have a valid reservation map pointer where 113384afd99bSAndy Whitcroft * it is being copied for another still existing VMA. As that VMA 113484afd99bSAndy Whitcroft * has a reference to the reservation map it cannot dissappear until 113584afd99bSAndy Whitcroft * after this open call completes. It is therefore safe to take a 113684afd99bSAndy Whitcroft * new reference here without additional locking. 113784afd99bSAndy Whitcroft */ 113884afd99bSAndy Whitcroft if (reservations) 113984afd99bSAndy Whitcroft kref_get(&reservations->refs); 114084afd99bSAndy Whitcroft } 114184afd99bSAndy Whitcroft 1142a1e78772SMel Gorman static void hugetlb_vm_op_close(struct vm_area_struct *vma) 1143a1e78772SMel Gorman { 114484afd99bSAndy Whitcroft struct resv_map *reservations = vma_resv_map(vma); 114584afd99bSAndy Whitcroft unsigned long reserve; 114684afd99bSAndy Whitcroft unsigned long start; 114784afd99bSAndy Whitcroft unsigned long end; 114884afd99bSAndy Whitcroft 114984afd99bSAndy Whitcroft if (reservations) { 1150a858f7b2SJohannes Weiner start = vma_hugecache_offset(vma, vma->vm_start); 1151a858f7b2SJohannes Weiner end = vma_hugecache_offset(vma, vma->vm_end); 115284afd99bSAndy Whitcroft 115384afd99bSAndy Whitcroft reserve = (end - start) - 115484afd99bSAndy Whitcroft region_count(&reservations->regions, start, end); 115584afd99bSAndy Whitcroft 115684afd99bSAndy Whitcroft kref_put(&reservations->refs, resv_map_release); 115784afd99bSAndy Whitcroft 1158a1e78772SMel Gorman if (reserve) 1159a1e78772SMel Gorman hugetlb_acct_memory(-reserve); 1160a1e78772SMel Gorman } 116184afd99bSAndy Whitcroft } 1162a1e78772SMel Gorman 11631da177e4SLinus Torvalds /* 11641da177e4SLinus Torvalds * We cannot handle pagefaults against hugetlb pages at all. They cause 11651da177e4SLinus Torvalds * handle_mm_fault() to try to instantiate regular-sized pages in the 11661da177e4SLinus Torvalds * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 11671da177e4SLinus Torvalds * this far. 11681da177e4SLinus Torvalds */ 1169d0217ac0SNick Piggin static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 11701da177e4SLinus Torvalds { 11711da177e4SLinus Torvalds BUG(); 1172d0217ac0SNick Piggin return 0; 11731da177e4SLinus Torvalds } 11741da177e4SLinus Torvalds 11751da177e4SLinus Torvalds struct vm_operations_struct hugetlb_vm_ops = { 1176d0217ac0SNick Piggin .fault = hugetlb_vm_op_fault, 117784afd99bSAndy Whitcroft .open = hugetlb_vm_op_open, 1178a1e78772SMel Gorman .close = hugetlb_vm_op_close, 11791da177e4SLinus Torvalds }; 11801da177e4SLinus Torvalds 11811e8f889bSDavid Gibson static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 11821e8f889bSDavid Gibson int writable) 118363551ae0SDavid Gibson { 118463551ae0SDavid Gibson pte_t entry; 118563551ae0SDavid Gibson 11861e8f889bSDavid Gibson if (writable) { 118763551ae0SDavid Gibson entry = 118863551ae0SDavid Gibson pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 118963551ae0SDavid Gibson } else { 11907f2e9525SGerald Schaefer entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 119163551ae0SDavid Gibson } 119263551ae0SDavid Gibson entry = pte_mkyoung(entry); 119363551ae0SDavid Gibson entry = pte_mkhuge(entry); 119463551ae0SDavid Gibson 119563551ae0SDavid Gibson return entry; 119663551ae0SDavid Gibson } 119763551ae0SDavid Gibson 11981e8f889bSDavid Gibson static void set_huge_ptep_writable(struct vm_area_struct *vma, 11991e8f889bSDavid Gibson unsigned long address, pte_t *ptep) 12001e8f889bSDavid Gibson { 12011e8f889bSDavid Gibson pte_t entry; 12021e8f889bSDavid Gibson 12037f2e9525SGerald Schaefer entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); 12047f2e9525SGerald Schaefer if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { 12051e8f889bSDavid Gibson update_mmu_cache(vma, address, entry); 12061e8f889bSDavid Gibson } 12078dab5241SBenjamin Herrenschmidt } 12081e8f889bSDavid Gibson 12091e8f889bSDavid Gibson 121063551ae0SDavid Gibson int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 121163551ae0SDavid Gibson struct vm_area_struct *vma) 121263551ae0SDavid Gibson { 121363551ae0SDavid Gibson pte_t *src_pte, *dst_pte, entry; 121463551ae0SDavid Gibson struct page *ptepage; 12151c59827dSHugh Dickins unsigned long addr; 12161e8f889bSDavid Gibson int cow; 12171e8f889bSDavid Gibson 12181e8f889bSDavid Gibson cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 121963551ae0SDavid Gibson 12201c59827dSHugh Dickins for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 1221c74df32cSHugh Dickins src_pte = huge_pte_offset(src, addr); 1222c74df32cSHugh Dickins if (!src_pte) 1223c74df32cSHugh Dickins continue; 122463551ae0SDavid Gibson dst_pte = huge_pte_alloc(dst, addr); 122563551ae0SDavid Gibson if (!dst_pte) 122663551ae0SDavid Gibson goto nomem; 1227c5c99429SLarry Woodman 1228c5c99429SLarry Woodman /* If the pagetables are shared don't copy or take references */ 1229c5c99429SLarry Woodman if (dst_pte == src_pte) 1230c5c99429SLarry Woodman continue; 1231c5c99429SLarry Woodman 1232c74df32cSHugh Dickins spin_lock(&dst->page_table_lock); 123346478758SNick Piggin spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); 12347f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(src_pte))) { 12351e8f889bSDavid Gibson if (cow) 12367f2e9525SGerald Schaefer huge_ptep_set_wrprotect(src, addr, src_pte); 12377f2e9525SGerald Schaefer entry = huge_ptep_get(src_pte); 123863551ae0SDavid Gibson ptepage = pte_page(entry); 123963551ae0SDavid Gibson get_page(ptepage); 124063551ae0SDavid Gibson set_huge_pte_at(dst, addr, dst_pte, entry); 12411c59827dSHugh Dickins } 12421c59827dSHugh Dickins spin_unlock(&src->page_table_lock); 1243c74df32cSHugh Dickins spin_unlock(&dst->page_table_lock); 124463551ae0SDavid Gibson } 124563551ae0SDavid Gibson return 0; 124663551ae0SDavid Gibson 124763551ae0SDavid Gibson nomem: 124863551ae0SDavid Gibson return -ENOMEM; 124963551ae0SDavid Gibson } 125063551ae0SDavid Gibson 1251502717f4SChen, Kenneth W void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 125204f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 125363551ae0SDavid Gibson { 125463551ae0SDavid Gibson struct mm_struct *mm = vma->vm_mm; 125563551ae0SDavid Gibson unsigned long address; 1256c7546f8fSDavid Gibson pte_t *ptep; 125763551ae0SDavid Gibson pte_t pte; 125863551ae0SDavid Gibson struct page *page; 1259fe1668aeSChen, Kenneth W struct page *tmp; 1260c0a499c2SChen, Kenneth W /* 1261c0a499c2SChen, Kenneth W * A page gathering list, protected by per file i_mmap_lock. The 1262c0a499c2SChen, Kenneth W * lock is used to avoid list corruption from multiple unmapping 1263c0a499c2SChen, Kenneth W * of the same page since we are using page->lru. 1264c0a499c2SChen, Kenneth W */ 1265fe1668aeSChen, Kenneth W LIST_HEAD(page_list); 126663551ae0SDavid Gibson 126763551ae0SDavid Gibson WARN_ON(!is_vm_hugetlb_page(vma)); 126863551ae0SDavid Gibson BUG_ON(start & ~HPAGE_MASK); 126963551ae0SDavid Gibson BUG_ON(end & ~HPAGE_MASK); 127063551ae0SDavid Gibson 1271508034a3SHugh Dickins spin_lock(&mm->page_table_lock); 127263551ae0SDavid Gibson for (address = start; address < end; address += HPAGE_SIZE) { 1273c7546f8fSDavid Gibson ptep = huge_pte_offset(mm, address); 1274c7546f8fSDavid Gibson if (!ptep) 1275c7546f8fSDavid Gibson continue; 1276c7546f8fSDavid Gibson 127739dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 127839dde65cSChen, Kenneth W continue; 127939dde65cSChen, Kenneth W 128004f2cbe3SMel Gorman /* 128104f2cbe3SMel Gorman * If a reference page is supplied, it is because a specific 128204f2cbe3SMel Gorman * page is being unmapped, not a range. Ensure the page we 128304f2cbe3SMel Gorman * are about to unmap is the actual page of interest. 128404f2cbe3SMel Gorman */ 128504f2cbe3SMel Gorman if (ref_page) { 128604f2cbe3SMel Gorman pte = huge_ptep_get(ptep); 128704f2cbe3SMel Gorman if (huge_pte_none(pte)) 128804f2cbe3SMel Gorman continue; 128904f2cbe3SMel Gorman page = pte_page(pte); 129004f2cbe3SMel Gorman if (page != ref_page) 129104f2cbe3SMel Gorman continue; 129204f2cbe3SMel Gorman 129304f2cbe3SMel Gorman /* 129404f2cbe3SMel Gorman * Mark the VMA as having unmapped its page so that 129504f2cbe3SMel Gorman * future faults in this VMA will fail rather than 129604f2cbe3SMel Gorman * looking like data was lost 129704f2cbe3SMel Gorman */ 129804f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 129904f2cbe3SMel Gorman } 130004f2cbe3SMel Gorman 1301c7546f8fSDavid Gibson pte = huge_ptep_get_and_clear(mm, address, ptep); 13027f2e9525SGerald Schaefer if (huge_pte_none(pte)) 130363551ae0SDavid Gibson continue; 1304c7546f8fSDavid Gibson 130563551ae0SDavid Gibson page = pte_page(pte); 13066649a386SKen Chen if (pte_dirty(pte)) 13076649a386SKen Chen set_page_dirty(page); 1308fe1668aeSChen, Kenneth W list_add(&page->lru, &page_list); 130963551ae0SDavid Gibson } 13101da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 1311508034a3SHugh Dickins flush_tlb_range(vma, start, end); 1312fe1668aeSChen, Kenneth W list_for_each_entry_safe(page, tmp, &page_list, lru) { 1313fe1668aeSChen, Kenneth W list_del(&page->lru); 1314fe1668aeSChen, Kenneth W put_page(page); 1315fe1668aeSChen, Kenneth W } 13161da177e4SLinus Torvalds } 131763551ae0SDavid Gibson 1318502717f4SChen, Kenneth W void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 131904f2cbe3SMel Gorman unsigned long end, struct page *ref_page) 1320502717f4SChen, Kenneth W { 1321502717f4SChen, Kenneth W /* 1322502717f4SChen, Kenneth W * It is undesirable to test vma->vm_file as it should be non-null 1323502717f4SChen, Kenneth W * for valid hugetlb area. However, vm_file will be NULL in the error 1324502717f4SChen, Kenneth W * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 1325502717f4SChen, Kenneth W * do_mmap_pgoff() nullifies vma->vm_file before calling this function 1326502717f4SChen, Kenneth W * to clean up. Since no pte has actually been setup, it is safe to 1327502717f4SChen, Kenneth W * do nothing in this case. 1328502717f4SChen, Kenneth W */ 1329502717f4SChen, Kenneth W if (vma->vm_file) { 1330502717f4SChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 133104f2cbe3SMel Gorman __unmap_hugepage_range(vma, start, end, ref_page); 1332502717f4SChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 1333502717f4SChen, Kenneth W } 1334502717f4SChen, Kenneth W } 1335502717f4SChen, Kenneth W 133604f2cbe3SMel Gorman /* 133704f2cbe3SMel Gorman * This is called when the original mapper is failing to COW a MAP_PRIVATE 133804f2cbe3SMel Gorman * mappping it owns the reserve page for. The intention is to unmap the page 133904f2cbe3SMel Gorman * from other VMAs and let the children be SIGKILLed if they are faulting the 134004f2cbe3SMel Gorman * same region. 134104f2cbe3SMel Gorman */ 134204f2cbe3SMel Gorman int unmap_ref_private(struct mm_struct *mm, 134304f2cbe3SMel Gorman struct vm_area_struct *vma, 134404f2cbe3SMel Gorman struct page *page, 134504f2cbe3SMel Gorman unsigned long address) 134604f2cbe3SMel Gorman { 134704f2cbe3SMel Gorman struct vm_area_struct *iter_vma; 134804f2cbe3SMel Gorman struct address_space *mapping; 134904f2cbe3SMel Gorman struct prio_tree_iter iter; 135004f2cbe3SMel Gorman pgoff_t pgoff; 135104f2cbe3SMel Gorman 135204f2cbe3SMel Gorman /* 135304f2cbe3SMel Gorman * vm_pgoff is in PAGE_SIZE units, hence the different calculation 135404f2cbe3SMel Gorman * from page cache lookup which is in HPAGE_SIZE units. 135504f2cbe3SMel Gorman */ 135604f2cbe3SMel Gorman address = address & huge_page_mask(hstate_vma(vma)); 135704f2cbe3SMel Gorman pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) 135804f2cbe3SMel Gorman + (vma->vm_pgoff >> PAGE_SHIFT); 135904f2cbe3SMel Gorman mapping = (struct address_space *)page_private(page); 136004f2cbe3SMel Gorman 136104f2cbe3SMel Gorman vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 136204f2cbe3SMel Gorman /* Do not unmap the current VMA */ 136304f2cbe3SMel Gorman if (iter_vma == vma) 136404f2cbe3SMel Gorman continue; 136504f2cbe3SMel Gorman 136604f2cbe3SMel Gorman /* 136704f2cbe3SMel Gorman * Unmap the page from other VMAs without their own reserves. 136804f2cbe3SMel Gorman * They get marked to be SIGKILLed if they fault in these 136904f2cbe3SMel Gorman * areas. This is because a future no-page fault on this VMA 137004f2cbe3SMel Gorman * could insert a zeroed page instead of the data existing 137104f2cbe3SMel Gorman * from the time of fork. This would look like data corruption 137204f2cbe3SMel Gorman */ 137304f2cbe3SMel Gorman if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 137404f2cbe3SMel Gorman unmap_hugepage_range(iter_vma, 137504f2cbe3SMel Gorman address, address + HPAGE_SIZE, 137604f2cbe3SMel Gorman page); 137704f2cbe3SMel Gorman } 137804f2cbe3SMel Gorman 137904f2cbe3SMel Gorman return 1; 138004f2cbe3SMel Gorman } 138104f2cbe3SMel Gorman 13821e8f889bSDavid Gibson static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 138304f2cbe3SMel Gorman unsigned long address, pte_t *ptep, pte_t pte, 138404f2cbe3SMel Gorman struct page *pagecache_page) 13851e8f889bSDavid Gibson { 13861e8f889bSDavid Gibson struct page *old_page, *new_page; 138779ac6ba4SDavid Gibson int avoidcopy; 138804f2cbe3SMel Gorman int outside_reserve = 0; 13891e8f889bSDavid Gibson 13901e8f889bSDavid Gibson old_page = pte_page(pte); 13911e8f889bSDavid Gibson 139204f2cbe3SMel Gorman retry_avoidcopy: 13931e8f889bSDavid Gibson /* If no-one else is actually using this page, avoid the copy 13941e8f889bSDavid Gibson * and just make the page writable */ 13951e8f889bSDavid Gibson avoidcopy = (page_count(old_page) == 1); 13961e8f889bSDavid Gibson if (avoidcopy) { 13971e8f889bSDavid Gibson set_huge_ptep_writable(vma, address, ptep); 139883c54070SNick Piggin return 0; 13991e8f889bSDavid Gibson } 14001e8f889bSDavid Gibson 140104f2cbe3SMel Gorman /* 140204f2cbe3SMel Gorman * If the process that created a MAP_PRIVATE mapping is about to 140304f2cbe3SMel Gorman * perform a COW due to a shared page count, attempt to satisfy 140404f2cbe3SMel Gorman * the allocation without using the existing reserves. The pagecache 140504f2cbe3SMel Gorman * page is used to determine if the reserve at this address was 140604f2cbe3SMel Gorman * consumed or not. If reserves were used, a partial faulted mapping 140704f2cbe3SMel Gorman * at the time of fork() could consume its reserves on COW instead 140804f2cbe3SMel Gorman * of the full address range. 140904f2cbe3SMel Gorman */ 141004f2cbe3SMel Gorman if (!(vma->vm_flags & VM_SHARED) && 141104f2cbe3SMel Gorman is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 141204f2cbe3SMel Gorman old_page != pagecache_page) 141304f2cbe3SMel Gorman outside_reserve = 1; 141404f2cbe3SMel Gorman 14151e8f889bSDavid Gibson page_cache_get(old_page); 141604f2cbe3SMel Gorman new_page = alloc_huge_page(vma, address, outside_reserve); 14171e8f889bSDavid Gibson 14182fc39cecSAdam Litke if (IS_ERR(new_page)) { 14191e8f889bSDavid Gibson page_cache_release(old_page); 142004f2cbe3SMel Gorman 142104f2cbe3SMel Gorman /* 142204f2cbe3SMel Gorman * If a process owning a MAP_PRIVATE mapping fails to COW, 142304f2cbe3SMel Gorman * it is due to references held by a child and an insufficient 142404f2cbe3SMel Gorman * huge page pool. To guarantee the original mappers 142504f2cbe3SMel Gorman * reliability, unmap the page from child processes. The child 142604f2cbe3SMel Gorman * may get SIGKILLed if it later faults. 142704f2cbe3SMel Gorman */ 142804f2cbe3SMel Gorman if (outside_reserve) { 142904f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 143004f2cbe3SMel Gorman if (unmap_ref_private(mm, vma, old_page, address)) { 143104f2cbe3SMel Gorman BUG_ON(page_count(old_page) != 1); 143204f2cbe3SMel Gorman BUG_ON(huge_pte_none(pte)); 143304f2cbe3SMel Gorman goto retry_avoidcopy; 143404f2cbe3SMel Gorman } 143504f2cbe3SMel Gorman WARN_ON_ONCE(1); 143604f2cbe3SMel Gorman } 143704f2cbe3SMel Gorman 14382fc39cecSAdam Litke return -PTR_ERR(new_page); 14391e8f889bSDavid Gibson } 14401e8f889bSDavid Gibson 14411e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 14429de455b2SAtsushi Nemoto copy_huge_page(new_page, old_page, address, vma); 14430ed361deSNick Piggin __SetPageUptodate(new_page); 14441e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 14451e8f889bSDavid Gibson 14461e8f889bSDavid Gibson ptep = huge_pte_offset(mm, address & HPAGE_MASK); 14477f2e9525SGerald Schaefer if (likely(pte_same(huge_ptep_get(ptep), pte))) { 14481e8f889bSDavid Gibson /* Break COW */ 14498fe627ecSGerald Schaefer huge_ptep_clear_flush(vma, address, ptep); 14501e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, 14511e8f889bSDavid Gibson make_huge_pte(vma, new_page, 1)); 14521e8f889bSDavid Gibson /* Make the old page be freed below */ 14531e8f889bSDavid Gibson new_page = old_page; 14541e8f889bSDavid Gibson } 14551e8f889bSDavid Gibson page_cache_release(new_page); 14561e8f889bSDavid Gibson page_cache_release(old_page); 145783c54070SNick Piggin return 0; 14581e8f889bSDavid Gibson } 14591e8f889bSDavid Gibson 146004f2cbe3SMel Gorman /* Return the pagecache page at a given address within a VMA */ 146104f2cbe3SMel Gorman static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma, 146204f2cbe3SMel Gorman unsigned long address) 146304f2cbe3SMel Gorman { 146404f2cbe3SMel Gorman struct address_space *mapping; 1465e7c4b0bfSAndy Whitcroft pgoff_t idx; 146604f2cbe3SMel Gorman 146704f2cbe3SMel Gorman mapping = vma->vm_file->f_mapping; 1468a858f7b2SJohannes Weiner idx = vma_hugecache_offset(vma, address); 146904f2cbe3SMel Gorman 147004f2cbe3SMel Gorman return find_lock_page(mapping, idx); 147104f2cbe3SMel Gorman } 147204f2cbe3SMel Gorman 1473a1ed3ddaSRobert P. J. Day static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 14741e8f889bSDavid Gibson unsigned long address, pte_t *ptep, int write_access) 1475ac9b9c66SHugh Dickins { 1476ac9b9c66SHugh Dickins int ret = VM_FAULT_SIGBUS; 1477e7c4b0bfSAndy Whitcroft pgoff_t idx; 14784c887265SAdam Litke unsigned long size; 14794c887265SAdam Litke struct page *page; 14804c887265SAdam Litke struct address_space *mapping; 14811e8f889bSDavid Gibson pte_t new_pte; 14824c887265SAdam Litke 148304f2cbe3SMel Gorman /* 148404f2cbe3SMel Gorman * Currently, we are forced to kill the process in the event the 148504f2cbe3SMel Gorman * original mapper has unmapped pages from the child due to a failed 148604f2cbe3SMel Gorman * COW. Warn that such a situation has occured as it may not be obvious 148704f2cbe3SMel Gorman */ 148804f2cbe3SMel Gorman if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 148904f2cbe3SMel Gorman printk(KERN_WARNING 149004f2cbe3SMel Gorman "PID %d killed due to inadequate hugepage pool\n", 149104f2cbe3SMel Gorman current->pid); 149204f2cbe3SMel Gorman return ret; 149304f2cbe3SMel Gorman } 149404f2cbe3SMel Gorman 14954c887265SAdam Litke mapping = vma->vm_file->f_mapping; 1496a858f7b2SJohannes Weiner idx = vma_hugecache_offset(vma, address); 14974c887265SAdam Litke 14984c887265SAdam Litke /* 14994c887265SAdam Litke * Use page lock to guard against racing truncation 15004c887265SAdam Litke * before we get page_table_lock. 15014c887265SAdam Litke */ 15026bda666aSChristoph Lameter retry: 15036bda666aSChristoph Lameter page = find_lock_page(mapping, idx); 15046bda666aSChristoph Lameter if (!page) { 1505ebed4bfcSHugh Dickins size = i_size_read(mapping->host) >> HPAGE_SHIFT; 1506ebed4bfcSHugh Dickins if (idx >= size) 1507ebed4bfcSHugh Dickins goto out; 150804f2cbe3SMel Gorman page = alloc_huge_page(vma, address, 0); 15092fc39cecSAdam Litke if (IS_ERR(page)) { 15102fc39cecSAdam Litke ret = -PTR_ERR(page); 15116bda666aSChristoph Lameter goto out; 15126bda666aSChristoph Lameter } 151379ac6ba4SDavid Gibson clear_huge_page(page, address); 15140ed361deSNick Piggin __SetPageUptodate(page); 1515ac9b9c66SHugh Dickins 15166bda666aSChristoph Lameter if (vma->vm_flags & VM_SHARED) { 15176bda666aSChristoph Lameter int err; 151845c682a6SKen Chen struct inode *inode = mapping->host; 15196bda666aSChristoph Lameter 15206bda666aSChristoph Lameter err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 15216bda666aSChristoph Lameter if (err) { 15226bda666aSChristoph Lameter put_page(page); 15236bda666aSChristoph Lameter if (err == -EEXIST) 15246bda666aSChristoph Lameter goto retry; 15256bda666aSChristoph Lameter goto out; 15266bda666aSChristoph Lameter } 152745c682a6SKen Chen 152845c682a6SKen Chen spin_lock(&inode->i_lock); 152945c682a6SKen Chen inode->i_blocks += BLOCKS_PER_HUGEPAGE; 153045c682a6SKen Chen spin_unlock(&inode->i_lock); 15316bda666aSChristoph Lameter } else 15326bda666aSChristoph Lameter lock_page(page); 15336bda666aSChristoph Lameter } 15341e8f889bSDavid Gibson 1535ac9b9c66SHugh Dickins spin_lock(&mm->page_table_lock); 15364c887265SAdam Litke size = i_size_read(mapping->host) >> HPAGE_SHIFT; 15374c887265SAdam Litke if (idx >= size) 15384c887265SAdam Litke goto backout; 15394c887265SAdam Litke 154083c54070SNick Piggin ret = 0; 15417f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) 15424c887265SAdam Litke goto backout; 15434c887265SAdam Litke 15441e8f889bSDavid Gibson new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 15451e8f889bSDavid Gibson && (vma->vm_flags & VM_SHARED))); 15461e8f889bSDavid Gibson set_huge_pte_at(mm, address, ptep, new_pte); 15471e8f889bSDavid Gibson 15481e8f889bSDavid Gibson if (write_access && !(vma->vm_flags & VM_SHARED)) { 15491e8f889bSDavid Gibson /* Optimization, do the COW without a second fault */ 155004f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); 15511e8f889bSDavid Gibson } 15521e8f889bSDavid Gibson 1553ac9b9c66SHugh Dickins spin_unlock(&mm->page_table_lock); 15544c887265SAdam Litke unlock_page(page); 15554c887265SAdam Litke out: 1556ac9b9c66SHugh Dickins return ret; 15574c887265SAdam Litke 15584c887265SAdam Litke backout: 15594c887265SAdam Litke spin_unlock(&mm->page_table_lock); 15604c887265SAdam Litke unlock_page(page); 15614c887265SAdam Litke put_page(page); 15624c887265SAdam Litke goto out; 1563ac9b9c66SHugh Dickins } 1564ac9b9c66SHugh Dickins 156586e5216fSAdam Litke int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 156686e5216fSAdam Litke unsigned long address, int write_access) 156786e5216fSAdam Litke { 156886e5216fSAdam Litke pte_t *ptep; 156986e5216fSAdam Litke pte_t entry; 15701e8f889bSDavid Gibson int ret; 15713935baa9SDavid Gibson static DEFINE_MUTEX(hugetlb_instantiation_mutex); 157286e5216fSAdam Litke 157386e5216fSAdam Litke ptep = huge_pte_alloc(mm, address); 157486e5216fSAdam Litke if (!ptep) 157586e5216fSAdam Litke return VM_FAULT_OOM; 157686e5216fSAdam Litke 15773935baa9SDavid Gibson /* 15783935baa9SDavid Gibson * Serialize hugepage allocation and instantiation, so that we don't 15793935baa9SDavid Gibson * get spurious allocation failures if two CPUs race to instantiate 15803935baa9SDavid Gibson * the same page in the page cache. 15813935baa9SDavid Gibson */ 15823935baa9SDavid Gibson mutex_lock(&hugetlb_instantiation_mutex); 15837f2e9525SGerald Schaefer entry = huge_ptep_get(ptep); 15847f2e9525SGerald Schaefer if (huge_pte_none(entry)) { 15853935baa9SDavid Gibson ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 15863935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 15873935baa9SDavid Gibson return ret; 15883935baa9SDavid Gibson } 158986e5216fSAdam Litke 159083c54070SNick Piggin ret = 0; 15911e8f889bSDavid Gibson 15921e8f889bSDavid Gibson spin_lock(&mm->page_table_lock); 15931e8f889bSDavid Gibson /* Check for a racing update before calling hugetlb_cow */ 15947f2e9525SGerald Schaefer if (likely(pte_same(entry, huge_ptep_get(ptep)))) 159504f2cbe3SMel Gorman if (write_access && !pte_write(entry)) { 159604f2cbe3SMel Gorman struct page *page; 159704f2cbe3SMel Gorman page = hugetlbfs_pagecache_page(vma, address); 159804f2cbe3SMel Gorman ret = hugetlb_cow(mm, vma, address, ptep, entry, page); 159904f2cbe3SMel Gorman if (page) { 160004f2cbe3SMel Gorman unlock_page(page); 160104f2cbe3SMel Gorman put_page(page); 160204f2cbe3SMel Gorman } 160304f2cbe3SMel Gorman } 16041e8f889bSDavid Gibson spin_unlock(&mm->page_table_lock); 16053935baa9SDavid Gibson mutex_unlock(&hugetlb_instantiation_mutex); 16061e8f889bSDavid Gibson 16071e8f889bSDavid Gibson return ret; 160886e5216fSAdam Litke } 160986e5216fSAdam Litke 161063551ae0SDavid Gibson int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 161163551ae0SDavid Gibson struct page **pages, struct vm_area_struct **vmas, 16125b23dbe8SAdam Litke unsigned long *position, int *length, int i, 16135b23dbe8SAdam Litke int write) 161463551ae0SDavid Gibson { 1615d5d4b0aaSChen, Kenneth W unsigned long pfn_offset; 1616d5d4b0aaSChen, Kenneth W unsigned long vaddr = *position; 161763551ae0SDavid Gibson int remainder = *length; 161863551ae0SDavid Gibson 16191c59827dSHugh Dickins spin_lock(&mm->page_table_lock); 162063551ae0SDavid Gibson while (vaddr < vma->vm_end && remainder) { 162163551ae0SDavid Gibson pte_t *pte; 162263551ae0SDavid Gibson struct page *page; 162363551ae0SDavid Gibson 16244c887265SAdam Litke /* 16254c887265SAdam Litke * Some archs (sparc64, sh*) have multiple pte_ts to 16264c887265SAdam Litke * each hugepage. We have to make * sure we get the 16274c887265SAdam Litke * first, for the page indexing below to work. 16284c887265SAdam Litke */ 162963551ae0SDavid Gibson pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 163063551ae0SDavid Gibson 16317f2e9525SGerald Schaefer if (!pte || huge_pte_none(huge_ptep_get(pte)) || 16327f2e9525SGerald Schaefer (write && !pte_write(huge_ptep_get(pte)))) { 16334c887265SAdam Litke int ret; 16344c887265SAdam Litke 16354c887265SAdam Litke spin_unlock(&mm->page_table_lock); 16365b23dbe8SAdam Litke ret = hugetlb_fault(mm, vma, vaddr, write); 16374c887265SAdam Litke spin_lock(&mm->page_table_lock); 1638a89182c7SAdam Litke if (!(ret & VM_FAULT_ERROR)) 16394c887265SAdam Litke continue; 16404c887265SAdam Litke 16411c59827dSHugh Dickins remainder = 0; 16421c59827dSHugh Dickins if (!i) 16431c59827dSHugh Dickins i = -EFAULT; 16441c59827dSHugh Dickins break; 16451c59827dSHugh Dickins } 164663551ae0SDavid Gibson 1647d5d4b0aaSChen, Kenneth W pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 16487f2e9525SGerald Schaefer page = pte_page(huge_ptep_get(pte)); 1649d5d4b0aaSChen, Kenneth W same_page: 1650d6692183SChen, Kenneth W if (pages) { 165163551ae0SDavid Gibson get_page(page); 1652d5d4b0aaSChen, Kenneth W pages[i] = page + pfn_offset; 1653d6692183SChen, Kenneth W } 165463551ae0SDavid Gibson 165563551ae0SDavid Gibson if (vmas) 165663551ae0SDavid Gibson vmas[i] = vma; 165763551ae0SDavid Gibson 165863551ae0SDavid Gibson vaddr += PAGE_SIZE; 1659d5d4b0aaSChen, Kenneth W ++pfn_offset; 166063551ae0SDavid Gibson --remainder; 166163551ae0SDavid Gibson ++i; 1662d5d4b0aaSChen, Kenneth W if (vaddr < vma->vm_end && remainder && 1663d5d4b0aaSChen, Kenneth W pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 1664d5d4b0aaSChen, Kenneth W /* 1665d5d4b0aaSChen, Kenneth W * We use pfn_offset to avoid touching the pageframes 1666d5d4b0aaSChen, Kenneth W * of this compound page. 1667d5d4b0aaSChen, Kenneth W */ 1668d5d4b0aaSChen, Kenneth W goto same_page; 1669d5d4b0aaSChen, Kenneth W } 167063551ae0SDavid Gibson } 16711c59827dSHugh Dickins spin_unlock(&mm->page_table_lock); 167263551ae0SDavid Gibson *length = remainder; 167363551ae0SDavid Gibson *position = vaddr; 167463551ae0SDavid Gibson 167563551ae0SDavid Gibson return i; 167663551ae0SDavid Gibson } 16778f860591SZhang, Yanmin 16788f860591SZhang, Yanmin void hugetlb_change_protection(struct vm_area_struct *vma, 16798f860591SZhang, Yanmin unsigned long address, unsigned long end, pgprot_t newprot) 16808f860591SZhang, Yanmin { 16818f860591SZhang, Yanmin struct mm_struct *mm = vma->vm_mm; 16828f860591SZhang, Yanmin unsigned long start = address; 16838f860591SZhang, Yanmin pte_t *ptep; 16848f860591SZhang, Yanmin pte_t pte; 16858f860591SZhang, Yanmin 16868f860591SZhang, Yanmin BUG_ON(address >= end); 16878f860591SZhang, Yanmin flush_cache_range(vma, address, end); 16888f860591SZhang, Yanmin 168939dde65cSChen, Kenneth W spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 16908f860591SZhang, Yanmin spin_lock(&mm->page_table_lock); 16918f860591SZhang, Yanmin for (; address < end; address += HPAGE_SIZE) { 16928f860591SZhang, Yanmin ptep = huge_pte_offset(mm, address); 16938f860591SZhang, Yanmin if (!ptep) 16948f860591SZhang, Yanmin continue; 169539dde65cSChen, Kenneth W if (huge_pmd_unshare(mm, &address, ptep)) 169639dde65cSChen, Kenneth W continue; 16977f2e9525SGerald Schaefer if (!huge_pte_none(huge_ptep_get(ptep))) { 16988f860591SZhang, Yanmin pte = huge_ptep_get_and_clear(mm, address, ptep); 16998f860591SZhang, Yanmin pte = pte_mkhuge(pte_modify(pte, newprot)); 17008f860591SZhang, Yanmin set_huge_pte_at(mm, address, ptep, pte); 17018f860591SZhang, Yanmin } 17028f860591SZhang, Yanmin } 17038f860591SZhang, Yanmin spin_unlock(&mm->page_table_lock); 170439dde65cSChen, Kenneth W spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 17058f860591SZhang, Yanmin 17068f860591SZhang, Yanmin flush_tlb_range(vma, start, end); 17078f860591SZhang, Yanmin } 17088f860591SZhang, Yanmin 1709a1e78772SMel Gorman int hugetlb_reserve_pages(struct inode *inode, 1710a1e78772SMel Gorman long from, long to, 1711a1e78772SMel Gorman struct vm_area_struct *vma) 1712e4e574b7SAdam Litke { 1713e4e574b7SAdam Litke long ret, chg; 1714e4e574b7SAdam Litke 1715c37f9fb1SAndy Whitcroft if (vma && vma->vm_flags & VM_NORESERVE) 1716c37f9fb1SAndy Whitcroft return 0; 1717c37f9fb1SAndy Whitcroft 1718a1e78772SMel Gorman /* 1719a1e78772SMel Gorman * Shared mappings base their reservation on the number of pages that 1720a1e78772SMel Gorman * are already allocated on behalf of the file. Private mappings need 1721a1e78772SMel Gorman * to reserve the full area even if read-only as mprotect() may be 1722a1e78772SMel Gorman * called to make the mapping read-write. Assume !vma is a shm mapping 1723a1e78772SMel Gorman */ 1724a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 1725e4e574b7SAdam Litke chg = region_chg(&inode->i_mapping->private_list, from, to); 1726a1e78772SMel Gorman else { 172784afd99bSAndy Whitcroft struct resv_map *resv_map = resv_map_alloc(); 172884afd99bSAndy Whitcroft if (!resv_map) 172984afd99bSAndy Whitcroft return -ENOMEM; 173084afd99bSAndy Whitcroft 1731a1e78772SMel Gorman chg = to - from; 173284afd99bSAndy Whitcroft 173384afd99bSAndy Whitcroft set_vma_resv_map(vma, resv_map); 173404f2cbe3SMel Gorman set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 1735a1e78772SMel Gorman } 1736a1e78772SMel Gorman 1737e4e574b7SAdam Litke if (chg < 0) 1738e4e574b7SAdam Litke return chg; 17398a630112SKen Chen 174090d8b7e6SAdam Litke if (hugetlb_get_quota(inode->i_mapping, chg)) 174190d8b7e6SAdam Litke return -ENOSPC; 1742a43a8c39SChen, Kenneth W ret = hugetlb_acct_memory(chg); 174368842c9bSKen Chen if (ret < 0) { 174468842c9bSKen Chen hugetlb_put_quota(inode->i_mapping, chg); 1745a43a8c39SChen, Kenneth W return ret; 174668842c9bSKen Chen } 1747a1e78772SMel Gorman if (!vma || vma->vm_flags & VM_SHARED) 1748a43a8c39SChen, Kenneth W region_add(&inode->i_mapping->private_list, from, to); 1749a43a8c39SChen, Kenneth W return 0; 1750a43a8c39SChen, Kenneth W } 1751a43a8c39SChen, Kenneth W 1752a43a8c39SChen, Kenneth W void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 1753a43a8c39SChen, Kenneth W { 1754a43a8c39SChen, Kenneth W long chg = region_truncate(&inode->i_mapping->private_list, offset); 175545c682a6SKen Chen 175645c682a6SKen Chen spin_lock(&inode->i_lock); 175745c682a6SKen Chen inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; 175845c682a6SKen Chen spin_unlock(&inode->i_lock); 175945c682a6SKen Chen 176090d8b7e6SAdam Litke hugetlb_put_quota(inode->i_mapping, (chg - freed)); 176190d8b7e6SAdam Litke hugetlb_acct_memory(-(chg - freed)); 1762a43a8c39SChen, Kenneth W } 1763